From 5ae51837febe5b98e52f54378fdcba0ee7e29546 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Thu, 1 Aug 2024 12:27:27 +0800 Subject: [PATCH 01/11] [RISCV][llvm] Support Zvfbfa codegen and vsetvli insertion spec: https://github.com/aswaterman/riscv-misc/blob/main/isa/zvfbfa.adoc --- .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 11 + llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 31 +- llvm/lib/Target/RISCV/RISCVInstrFormats.td | 6 + .../Target/RISCV/RISCVInstrInfoVPseudos.td | 24 +- llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td | 676 +++++++++++++++ llvm/lib/Target/RISCV/RISCVSubtarget.h | 3 +- .../RISCV/rvv/mixed-float-bf16-arith.ll | 186 +++++ llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll | 607 ++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll | 294 +++++++ llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll | 571 +++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll | 258 ++++++ llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll | 571 +++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll | 607 ++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll | 88 ++ llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll | 161 ++++ llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll | 216 +++++ .../test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll | 226 +++++ .../test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll | 270 ++++++ .../CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll | 270 ++++++ llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll | 288 +++++++ llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll | 288 +++++++ llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll | 282 +++++++ llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll | 264 ++++++ llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll | 282 +++++++ llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll | 571 +++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll | 571 +++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll | 571 +++++++++++++ .../test/CodeGen/RISCV/rvv/vfslide1down-bf.ll | 288 +++++++ llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll | 294 +++++++ llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll | 559 +++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll | 519 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll | 773 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll | 264 ++++++ llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll | 264 ++++++ llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll | 519 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll | 506 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll | 519 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll | 773 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll | 496 +++++++++++ llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll | 496 +++++++++++ llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll | 496 +++++++++++ llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll | 496 +++++++++++ llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll | 496 +++++++++++ llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll | 496 +++++++++++ 54 files changed, 21489 insertions(+), 18 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 70b7c430c410e..649fdb0791664 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -142,6 +142,12 @@ enum { ReadsPastVLShift = DestEEWShift + 2, ReadsPastVLMask = 1ULL << ReadsPastVLShift, + + // 0 -> Don't care about altfmt bit in VTYPE. + // 1 -> Is not altfmt. + // 2 -> Is altfmt(BF16). + AltFmtTypeShift = ReadsPastVLShift + 1, + AltFmtTypeMask = 3ULL << AltFmtTypeShift, }; // Helper functions to read TSFlags. @@ -183,6 +189,11 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) { return TSFlags & HasRoundModeOpMask; } +enum class AltFmtType { DontCare, IsNotAltFmt, IsAltFmt }; +static inline AltFmtType getAltFmtType(uint64_t TSFlags) { + return static_cast((TSFlags & AltFmtTypeMask) >> AltFmtTypeShift); +} + /// \returns true if this instruction uses vxrm static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; } diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 90e1c47a71c89..40b5e27e32384 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -164,6 +164,7 @@ struct DemandedFields { // If this is true, we demand that VTYPE is set to some legal state, i.e. that // vill is unset. bool VILL = false; + bool UseAltFmt = false; // Return true if any part of VTYPE was used bool usedVTYPE() const { @@ -183,6 +184,7 @@ struct DemandedFields { TailPolicy = true; MaskPolicy = true; VILL = true; + UseAltFmt = true; } // Mark all VL properties as demanded @@ -208,6 +210,7 @@ struct DemandedFields { TailPolicy |= B.TailPolicy; MaskPolicy |= B.MaskPolicy; VILL |= B.VILL; + UseAltFmt |= B.UseAltFmt; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -324,6 +327,9 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType, if (Used.MaskPolicy && RISCVVType::isMaskAgnostic(CurVType) != RISCVVType::isMaskAgnostic(NewVType)) return false; + if (Used.UseAltFmt == true && + RISCVVType::isAltFmt(CurVType) != RISCVVType::isAltFmt(NewVType)) + return false; return true; } @@ -475,6 +481,9 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) { Res.TailPolicy = false; } + Res.UseAltFmt = RISCVII::getAltFmtType(MI.getDesc().TSFlags) != + RISCVII::AltFmtType::DontCare; + return Res; } @@ -506,6 +515,7 @@ class VSETVLIInfo { uint8_t TailAgnostic : 1; uint8_t MaskAgnostic : 1; uint8_t SEWLMULRatioOnly : 1; + uint8_t AltFmt : 1; public: VSETVLIInfo() @@ -582,6 +592,7 @@ class VSETVLIInfo { RISCVVType::VLMUL getVLMUL() const { return VLMul; } bool getTailAgnostic() const { return TailAgnostic; } bool getMaskAgnostic() const { return MaskAgnostic; } + bool getAltFmt() const { return AltFmt; } bool hasNonZeroAVL(const LiveIntervals *LIS) const { if (hasAVLImm()) @@ -643,14 +654,16 @@ class VSETVLIInfo { SEW = RISCVVType::getSEW(VType); TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); + AltFmt = RISCVVType::isAltFmt(VType); } - void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) { + void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA, bool AF) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; SEW = S; TailAgnostic = TA; MaskAgnostic = MA; + AltFmt = AF; } void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; } @@ -658,7 +671,7 @@ class VSETVLIInfo { unsigned encodeVTYPE() const { assert(isValid() && !isUnknown() && !SEWLMULRatioOnly && "Can't encode VTYPE for uninitialized or unknown"); - return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic); + return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt); } bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; } @@ -670,9 +683,9 @@ class VSETVLIInfo { "Can't compare VTYPE in unknown state"); assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly && "Can't compare when only LMUL/SEW ratio is valid."); - return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic) == + return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt) == std::tie(Other.VLMul, Other.SEW, Other.TailAgnostic, - Other.MaskAgnostic); + Other.MaskAgnostic, Other.AltFmt); } unsigned getSEWLMULRatio() const { @@ -1001,6 +1014,8 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags); + bool AltFmt = + RISCVII::getAltFmtType(TSFlags) == RISCVII::AltFmtType::IsAltFmt; unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); // A Log2SEW of 0 is an operation on mask registers only. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; @@ -1041,7 +1056,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { assert(SEW == EEW && "Initial SEW doesn't match expected EEW"); } #endif - InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic); + InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt); forwardVSETVLIAVL(InstrInfo); @@ -1194,7 +1209,8 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, // be coalesced into another vsetvli since we won't demand any fields. VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly NewInfo.setAVLImm(1); - NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true); + NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true, + /*AF*/ false); Info = NewInfo; return; } @@ -1236,7 +1252,8 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, (Demanded.TailPolicy ? IncomingInfo : Info).getTailAgnostic() || IncomingInfo.getTailAgnostic(), (Demanded.MaskPolicy ? IncomingInfo : Info).getMaskAgnostic() || - IncomingInfo.getMaskAgnostic()); + IncomingInfo.getMaskAgnostic(), + Demanded.UseAltFmt ? IncomingInfo.getAltFmt() : 0); // If we only knew the sew/lmul ratio previously, replace the VTYPE but keep // the AVL. diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index 2afd77a96373b..7b262f2950ba5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -267,6 +267,12 @@ class RVInstCommon Don't care about altfmt bit in VTYPE. + // 1 -> Is not altfmt. + // 2 -> Is altfmt(BF16). + bits<2> AltFmtType = 0; + let TSFlags{28-27} = AltFmtType; } class RVInst { ["_M4", ""], ["_M8", ""], ["_SE", ""], - ["_RM", ""] + ["_RM", ""], + ["_ALT", ""] ]; string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst, !subst(AffixSubst[0], AffixSubst[1], Acc)); @@ -364,7 +368,7 @@ defset list AllVectors = { def VF16M4: GroupVTypeInfo; def VF16M8: GroupVTypeInfo; + V_M8, f16, FPR16>; def VF32M2: GroupVTypeInfo; @@ -382,16 +386,16 @@ defset list AllVectors = { } } - defset list AllBFloatVectors = { - defset list NoGroupBFloatVectors = { - defset list FractionalGroupBFloatVectors = { + defset list AllBF16Vectors = { + defset list NoGroupBF16Vectors = { + defset list FractionalGroupBF16Vectors = { def VBF16MF4: VTypeInfo; def VBF16MF2: VTypeInfo; } def VBF16M1: VTypeInfo; } - defset list GroupBFloatVectors = { + defset list GroupBF16Vectors = { def VBF16M2: GroupVTypeInfo; def VBF16M4: GroupVTypeInfo; defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF; } // Predicates = [HasVInstructionsAnyF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td index 6d8672b72a12d..29a4c46e212ad 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td @@ -44,6 +44,337 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { let mayRaiseFPException = true, Predicates = [HasStdExtZvfbfwma] in defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM; +defset list AllWidenableIntToBFloatVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + +multiclass VPseudoVALU_VV_VF_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryFV_VV_RM, + SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVALU_VF_RM_BF16 { + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFWALU_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_VV_RM, + SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_VF_RM, + SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFWALU_WV_WF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_WV_RM, + SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_WF_RM, + SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFMUL_VV_VF_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryFV_VV_RM, + SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVWMUL_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_VV_RM, + SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_VF_RM, + SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVMAC_VV_VF_AAXA_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoTernaryV_VV_AAXA_RM, + SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV", + "ReadVFMulAddV", m.MX, 16/*sew*/>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoTernaryV_VF_AAXA_RM, + SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF", + "ReadVFMulAddV", m.MX, f.SEW>; + } +} + +multiclass VPseudoVWMAC_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoTernaryW_VV_RM, + SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV", + "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, 16/*sew*/>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoTernaryW_VF_RM, + SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV", + "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>; + } +} + +multiclass VPseudoVRCP_V_BF16 { + foreach m = MxListF in { + defvar mx = m.MX; + let VLMul = m.value in { + def "_V_" # mx # "_E16" + : VPseudoUnaryNoMask, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + def "_V_" # mx # "_E16_MASK" + : VPseudoUnaryMask, + RISCVMaskedPseudo, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + } + } +} + +multiclass VPseudoVRCP_V_RM_BF16 { + foreach m = MxListF in { + defvar mx = m.MX; + let VLMul = m.value in { + def "_V_" # mx # "_E16" + : VPseudoUnaryNoMaskRoundingMode, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + def "_V_" # mx # "_E16_MASK" + : VPseudoUnaryMaskRoundingMode, + RISCVMaskedPseudo, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + } + } +} + +multiclass VPseudoVMAX_VV_VF_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryV_VV, + SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV", + m.MX, 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF, + SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF", + m.MX, f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVSGNJ_VV_VF_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryV_VV, + SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF, + SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVWCVTF_V_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListW in + defm _V : VPseudoConversion, + SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, 8/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVWCVTD_V_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _V : VPseudoConversion, + SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVNCVTD_W_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _W : VPseudoConversion, + SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVNCVTD_W_RM_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _W : VPseudoConversionRoundingMode, + SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +let Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT in { +let mayRaiseFPException = true in { +defm PseudoVFADD_ALT : VPseudoVALU_VV_VF_RM_BF16; +defm PseudoVFSUB_ALT : VPseudoVALU_VV_VF_RM_BF16; +defm PseudoVFRSUB_ALT : VPseudoVALU_VF_RM_BF16; +} + +let mayRaiseFPException = true in { +defm PseudoVFWADD_ALT : VPseudoVFWALU_VV_VF_RM_BF16; +defm PseudoVFWSUB_ALT : VPseudoVFWALU_VV_VF_RM_BF16; +defm PseudoVFWADD_ALT : VPseudoVFWALU_WV_WF_RM_BF16; +defm PseudoVFWSUB_ALT : VPseudoVFWALU_WV_WF_RM_BF16; +} + +let mayRaiseFPException = true in +defm PseudoVFMUL_ALT : VPseudoVFMUL_VV_VF_RM_BF16; + +let mayRaiseFPException = true in +defm PseudoVFWMUL_ALT : VPseudoVWMUL_VV_VF_RM_BF16; + +let mayRaiseFPException = true in { +defm PseudoVFMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +} + +let mayRaiseFPException = true in { +defm PseudoVFWMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWNMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWNMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +} + +let mayRaiseFPException = true in +defm PseudoVFRSQRT7_ALT : VPseudoVRCP_V_BF16; + +let mayRaiseFPException = true in +defm PseudoVFREC7_ALT : VPseudoVRCP_V_RM_BF16; + +let mayRaiseFPException = true in { +defm PseudoVFMIN_ALT : VPseudoVMAX_VV_VF_BF16; +defm PseudoVFMAX_ALT : VPseudoVMAX_VV_VF_BF16; +} + +defm PseudoVFSGNJ_ALT : VPseudoVSGNJ_VV_VF_BF16; +defm PseudoVFSGNJN_ALT : VPseudoVSGNJ_VV_VF_BF16; +defm PseudoVFSGNJX_ALT : VPseudoVSGNJ_VV_VF_BF16; + +let mayRaiseFPException = true in { +defm PseudoVMFEQ_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFNE_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFLT_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFLE_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFGT_ALT : VPseudoVCMPM_VF; +defm PseudoVMFGE_ALT : VPseudoVCMPM_VF; +} + +defm PseudoVFCLASS_ALT : VPseudoVCLS_V; + +defm PseudoVFMERGE_ALT : VPseudoVMRG_FM; + +defm PseudoVFMV_V_ALT : VPseudoVMV_F; + +let mayRaiseFPException = true in { +defm PseudoVFWCVT_F_XU_ALT : VPseudoVWCVTF_V_BF16; +defm PseudoVFWCVT_F_X_ALT : VPseudoVWCVTF_V_BF16; + +defm PseudoVFWCVT_F_F_ALT : VPseudoVWCVTD_V_BF16; +} // mayRaiseFPException = true + +let mayRaiseFPException = true in { +let hasSideEffects = 0, hasPostISelHook = 1 in { +defm PseudoVFNCVT_XU_F_ALT : VPseudoVNCVTI_W_RM; +defm PseudoVFNCVT_X_F_ALT : VPseudoVNCVTI_W_RM; +} + +defm PseudoVFNCVT_RTZ_XU_F_ALT : VPseudoVNCVTI_W; +defm PseudoVFNCVT_RTZ_X_F_ALT : VPseudoVNCVTI_W; + +defm PseudoVFNCVT_F_F_ALT : VPseudoVNCVTD_W_RM_BF16; + +defm PseudoVFNCVT_ROD_F_F_ALT : VPseudoVNCVTD_W_BF16; +} // mayRaiseFPException = true + +let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { + defvar f = SCALAR_F16; + let HasSEWOp = 1, BaseInstr = VFMV_F_S in + def "PseudoVFMV_" # f.FX # "_S_ALT" : + RISCVVPseudo<(outs f.fprclass:$rd), (ins VR:$rs2, sew:$sew)>, + Sched<[WriteVMovFS, ReadVMovFS]>; + let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1, + Constraints = "$rd = $passthru" in + def "PseudoVFMV_S_" # f.FX # "_ALT": + RISCVVPseudo<(outs VR:$rd), + (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew), + []>, + Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>; +} + +defm PseudoVFSLIDE1UP_ALT : VPseudoVSLD1_VF<"@earlyclobber $rd">; +defm PseudoVFSLIDE1DOWN_ALT : VPseudoVSLD1_VF; +} // Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT + //===----------------------------------------------------------------------===// // Patterns //===----------------------------------------------------------------------===// @@ -87,6 +418,130 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } + + defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBF16Vectors>; + defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllBF16Vectors, uimm5>; + defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", + eew=16, vtilist=AllBF16Vectors>; + defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBF16Vectors, uimm5>; + defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBF16Vectors, uimm5>; + + foreach fvti = AllBF16Vectors in { + defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM", + fvti.Vector, + fvti.Vector, fvti.Vector, fvti.Mask, + fvti.Log2SEW, fvti.LMul, fvti.RegClass, + fvti.RegClass, fvti.RegClass>; + defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE", + "V"#fvti.ScalarSuffix#"M", + fvti.Vector, + fvti.Vector, fvti.Scalar, fvti.Mask, + fvti.Log2SEW, fvti.LMul, fvti.RegClass, + fvti.RegClass, fvti.ScalarRegClass>; + defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); + def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru), + (fvti.Vector fvti.RegClass:$rs2), + (fvti.Scalar (fpimm0)), + (fvti.Mask VMV0:$vm), VLOpFrag)), + (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + + defvar ivti = GetIntVTypeInfo.Vti; + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), + fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp (fvti.Scalar fpimm0)), + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp fvti.ScalarRegClass:$rs1), + fvti.RegClass:$rs2)), + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, + (fvti.Scalar fvti.ScalarRegClass:$rs1), + (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + fvti.RegClass:$rs1, + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp (fvti.Scalar fpimm0)), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp fvti.ScalarRegClass:$rs1), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, + (fvti.Scalar fvti.ScalarRegClass:$rs1), + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector + (riscv_vrgather_vv_vl fvti.RegClass:$rs2, + (ivti.Vector fvti.RegClass:$rs1), + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VV_"# fvti.LMul.MX#"_E"# fvti.SEW#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector (riscv_vrgather_vx_vl fvti.RegClass:$rs2, GPR:$rs1, + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VX_"# fvti.LMul.MX#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$rs1, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector + (riscv_vrgather_vx_vl fvti.RegClass:$rs2, + uimm5:$imm, + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VI_"# fvti.LMul.MX#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, uimm5:$imm, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + } } let Predicates = [HasStdExtZvfbfwma] in { @@ -97,3 +552,224 @@ let Predicates = [HasStdExtZvfbfwma] in { defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16", AllWidenableBFloatToFloatVectors>; } + +multiclass VPatConversionVI_VF_BF16 { + foreach fvti = AllBF16Vectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VI_BF16 { + foreach vtiToWti = AllWidenableIntToBFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VF_BF16 { + foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + let Predicates = !listconcat(GetVTypeMinimalPredicates.Predicates, + GetVTypeMinimalPredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionVI_WF_BF16 { + foreach vtiToWti = AllWidenableIntToBFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionVI_WF_RM_BF16 { + foreach vtiToWti = AllWidenableIntToBFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversionRoundingMode; + } +} + +multiclass VPatConversionVF_WF_BF16 { + foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +let Predicates = [HasStdExtZvfbfa] in { +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE_ALT", AllBF16Vectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT_ALT", AllBF16Vectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE_ALT", AllBF16Vectors>; +defm : VPatConversionVI_VF_BF16<"int_riscv_vfclass", "PseudoVFCLASS_ALT">; +foreach vti = AllBF16Vectors in { + let Predicates = GetVTypePredicates.Predicates in + defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE_ALT", + "V"#vti.ScalarSuffix#"M", + vti.Vector, + vti.Vector, vti.Scalar, vti.Mask, + vti.Log2SEW, vti.LMul, vti.RegClass, + vti.RegClass, vti.ScalarRegClass>; +} +defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU_ALT", + isSEWAware=1>; +defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X_ALT", + isSEWAware=1>; +defm : VPatConversionWF_VF_BF16<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F_ALT", + isSEWAware=1>; +defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F_ALT">; +defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F_ALT">; +defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F_ALT">; +defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F_ALT">; +defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F_ALT", + AllWidenableBFloatToFloatVectors, isSEWAware=1>; +defm : VPatConversionVF_WF_BF16<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F_ALT", + isSEWAware=1>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP_ALT", AllBF16Vectors>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN_ALT", AllBF16Vectors>; + +foreach fvti = AllBF16Vectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = GetVTypePredicates.Predicates in { + // 13.16. Vector Floating-Point Move Instruction + // If we're splatting fpimm0, use vmv.v.x vd, x0. + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), + (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) + $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)), + (!cast("PseudoVMV_V_X_"#fvti.LMul.MX) + $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; + } + + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), + (!cast("PseudoVFMV_V_ALT_" # fvti.ScalarSuffix # "_" # + fvti.LMul.MX) + $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), + GPR:$vl, fvti.Log2SEW, TU_MU)>; + } +} + +foreach vti = NoGroupBF16Vectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + (vti.Scalar (fpimm0)), + VLOpFrag)), + (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), + VLOpFrag)), + (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + vti.ScalarRegClass:$rs1, + VLOpFrag)), + (!cast("PseudoVFMV_S_"#vti.ScalarSuffix#"_ALT") + vti.RegClass:$passthru, + (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; + } + + defvar vfmv_f_s_inst = !cast(!strconcat("PseudoVFMV_", + vti.ScalarSuffix, + "_S_ALT")); + // Only pattern-match extract-element operations where the index is 0. Any + // other index will have been custom-lowered to slide the vector correctly + // into place. + let Predicates = GetVTypePredicates.Predicates in + def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), + (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; +} +} // Predicates = [HasStdExtZvfbfa] diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 7dffa63d85505..da666ca551fa5 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -288,9 +288,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool hasVInstructionsI64() const { return HasStdExtZve64x; } bool hasVInstructionsF16Minimal() const { return HasStdExtZvfhmin; } bool hasVInstructionsF16() const { return HasStdExtZvfh; } - bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin; } + bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin || HasStdExtZvfbfa; } bool hasVInstructionsF32() const { return HasStdExtZve32f; } bool hasVInstructionsF64() const { return HasStdExtZve64d; } + bool hasVInstructionsBF16() const { return HasStdExtZvfbfmin || HasStdExtZvfbfa; } // F16 and F64 both require F32. bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); } bool hasVInstructionsFullMultiply() const { return HasStdExtV; } diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll new file mode 100644 index 0000000000000..1498ac9ca48e9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -0,0 +1,186 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen, iXLen); + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + , + iXLen); + +declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @test_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_half_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vse16.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, + %3, + %4, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + call void @llvm.riscv.vse( %a, ptr %ptr, iXLen %2) + + ret %b +} + +define @test_i32_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_i32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vv v10, v10, v11 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vse32.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, + %3, + %4, + iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + call void @llvm.riscv.vse( %a, ptr %ptr, iXLen %2) + + ret %b +} + +define @test_half_bf16_half( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_half_bf16_half: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v9, v10, v11 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v9, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, + %3, + %4, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + %c = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, + %a, + %4, + iXLen 0, iXLen %2) + + store %c, ptr %ptr + + ret %b +} + +define @test_bf16_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_bf16_half_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + undef, + %3, + %4, + iXLen 0, iXLen %2) + + %c = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %a, + %1, + iXLen 0, iXLen %2) + + store %b, ptr %ptr + + ret %c +} + +define @test_bf16_i16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_bf16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vadd.vv v9, v10, v11 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v9, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, + %3, + %4, + iXLen %2) + + store %b, ptr %ptr + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll new file mode 100644 index 0000000000000..7a9c6990b6137 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll @@ -0,0 +1,607 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll new file mode 100644 index 0000000000000..c9e9d75e3e34a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfclass.nxv1i16.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv1i16_nxv1bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i16.nxv1bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i16.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv2i16_nxv2bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i16.nxv2bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i16.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv4i16_nxv4bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i16.nxv4bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i16.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv8i16_nxv8bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i16.nxv8bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfclass.v v8, v10, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i16.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv16i16_nxv16bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i16.nxv16bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfclass.v v8, v12, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv32i16.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv32i16_nxv32bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv32i16.nxv32bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, tu, mu +; CHECK-NEXT: vfclass.v v8, v16, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll new file mode 100644 index 0000000000000..0937a82b48580 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll new file mode 100644 index 0000000000000..795fd4f56ec2c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll new file mode 100644 index 0000000000000..b419c02b63384 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll new file mode 100644 index 0000000000000..00a034f9b8595 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll @@ -0,0 +1,258 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmerge.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv2bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv4bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv8bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv16bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv32bf16.bf16( + undef, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll new file mode 100644 index 0000000000000..486d43b2e9d49 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll new file mode 100644 index 0000000000000..ebeda9eaf42c2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll new file mode 100644 index 0000000000000..b032c1d66f3b9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll new file mode 100644 index 0000000000000..e1914f4c826a8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll @@ -0,0 +1,607 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll new file mode 100644 index 0000000000000..fbc7311945c8b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( %0) + ret bfloat %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll new file mode 100644 index 0000000000000..a810809fca515 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll @@ -0,0 +1,161 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vfmv.s.f.nxv1bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv1bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv2bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv4bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv8bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv16bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv32bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_nxv1bf16_negzero( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16_negzero: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lui a1, 1048568 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat -0.0, iXLen %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll new file mode 100644 index 0000000000000..ceefe6b3aa83b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll @@ -0,0 +1,216 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmv.v.f.nxv1bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv32bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( + undef, + bfloat %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfmv.v.f_zero_nxv1bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv2bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv4bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv8bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv16bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv32bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( + undef, + bfloat 0.0, + iXLen %0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll new file mode 100644 index 0000000000000..0864e03d61f86 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll @@ -0,0 +1,226 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll new file mode 100644 index 0000000000000..40e652948d1a5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll new file mode 100644 index 0000000000000..1834f183e96d6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll new file mode 100644 index 0000000000000..fe2120ac612a8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll new file mode 100644 index 0000000000000..95ba5e94a9cd4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll new file mode 100644 index 0000000000000..4bf643003a38c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll new file mode 100644 index 0000000000000..7dcaa1c24e6de --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll new file mode 100644 index 0000000000000..9528f80453398 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll new file mode 100644 index 0000000000000..dcbb9ced92db7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll new file mode 100644 index 0000000000000..23de020938f73 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll @@ -0,0 +1,282 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrec7.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv1bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv2bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv2bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv4bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv4bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv8bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrec7.v v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv8bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv16bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrec7.v v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv16bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv32bf16( + undef, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrec7.v v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv32bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll new file mode 100644 index 0000000000000..c08363a0d7bf5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrsqrt7.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv1bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv2bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv4bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv8bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv16bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv32bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll new file mode 100644 index 0000000000000..8ca926ac04b19 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll @@ -0,0 +1,282 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrsub.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll new file mode 100644 index 0000000000000..7cd3b117f0cc9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll new file mode 100644 index 0000000000000..ea63a7d737086 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll new file mode 100644 index 0000000000000..3ed8d13d8d17d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll new file mode 100644 index 0000000000000..aa4efcba59753 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfslide1down.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll new file mode 100644 index 0000000000000..baf9f5a15b08b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfslide1up.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll new file mode 100644 index 0000000000000..2afb375149e49 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll @@ -0,0 +1,559 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( + undef, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv32bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll new file mode 100644 index 0000000000000..bdffe3b2d61dc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll new file mode 100644 index 0000000000000..dd345d0b0b786 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -0,0 +1,773 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl4re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv16f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wv v10, v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wv v12, v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll new file mode 100644 index 0000000000000..ef7e695b292e9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll new file mode 100644 index 0000000000000..174bf7a710ea6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll new file mode 100644 index 0000000000000..a3f667818ab0a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll new file mode 100644 index 0000000000000..7e4814e998c7e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwmul.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwmul.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll new file mode 100644 index 0000000000000..1e05e4c7acf25 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll new file mode 100644 index 0000000000000..223ad4f7483f6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll new file mode 100644 index 0000000000000..4c509faab1bab --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll new file mode 100644 index 0000000000000..f86ebf1aaf99e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -0,0 +1,773 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( + undef, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl4re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.bf16( + undef, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wv v10, v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wv v12, v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + undef, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll new file mode 100644 index 0000000000000..9bd859b3452f2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfeq.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll new file mode 100644 index 0000000000000..73946dc1a744c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfge.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll new file mode 100644 index 0000000000000..fac324ca5c125 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfgt.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll new file mode 100644 index 0000000000000..8356b7bbd3ff7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfle.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll new file mode 100644 index 0000000000000..2e1bcc5e87bfc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmflt.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll new file mode 100644 index 0000000000000..283ffc500fdde --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfne.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + From 55f49c540df4b881659742f63d047f679861b66a Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Sun, 28 Sep 2025 18:28:05 -0700 Subject: [PATCH 02/11] [RISCV][clang] Support Zvfbfa C intrinsics --- clang/include/clang/Basic/riscv_vector.td | 83 +- .../clang/Basic/riscv_vector_common.td | 66 +- clang/lib/Sema/SemaRISCV.cpp | 3 +- clang/lib/Support/RISCVVIntrinsicUtils.cpp | 5 + .../non-policy/non-overloaded/vfadd-bf16.c | 249 ++ .../non-policy/non-overloaded/vfclass-bf16.c | 134 ++ .../non-policy/non-overloaded/vfmacc-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmadd-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmax-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmerge-bf16.c | 69 + .../non-policy/non-overloaded/vfmin-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmsac-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmsub-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmul-bf16.c | 249 ++ .../non-policy/non-overloaded/vfmv-bf16.c | 189 ++ .../non-policy/non-overloaded/vfncvt-bf16.c | 724 ++++++ .../non-overloaded/vfncvt_rod-bf16.c | 113 + .../non-overloaded/vfncvt_rtz-bf16.c | 267 +++ .../non-policy/non-overloaded/vfnmacc-bf16.c | 249 ++ .../non-policy/non-overloaded/vfnmadd-bf16.c | 249 ++ .../non-policy/non-overloaded/vfnmsac-bf16.c | 249 ++ .../non-policy/non-overloaded/vfnmsub-bf16.c | 249 ++ .../non-policy/non-overloaded/vfrec7-bf16.c | 129 ++ .../non-policy/non-overloaded/vfrsqrt7-bf16.c | 129 ++ .../non-policy/non-overloaded/vfrsub-bf16.c | 129 ++ .../non-policy/non-overloaded/vfsgnj-bf16.c | 249 ++ .../non-policy/non-overloaded/vfsgnjn-bf16.c | 249 ++ .../non-policy/non-overloaded/vfsgnjx-bf16.c | 249 ++ .../non-overloaded/vfslide1down-bf16.c | 129 ++ .../non-overloaded/vfslide1up-bf16.c | 129 ++ .../non-policy/non-overloaded/vfsub-bf16.c | 249 ++ .../non-policy/non-overloaded/vfwadd-bf16.c | 899 ++++++++ .../non-policy/non-overloaded/vfwcvt-bf16.c | 366 +++ .../non-policy/non-overloaded/vfwmacc-bf16.c | 486 ++++ .../non-policy/non-overloaded/vfwmsac-bf16.c | 486 ++++ .../non-policy/non-overloaded/vfwmul-bf16.c | 455 ++++ .../non-policy/non-overloaded/vfwnmacc-bf16.c | 494 ++++ .../non-policy/non-overloaded/vfwnmsac-bf16.c | 494 ++++ .../non-policy/non-overloaded/vfwsub-bf16.c | 899 ++++++++ .../non-policy/non-overloaded/vmfeq-bf16.c | 249 ++ .../non-policy/non-overloaded/vmfge-bf16.c | 249 ++ .../non-policy/non-overloaded/vmfgt-bf16.c | 249 ++ .../non-policy/non-overloaded/vmfle-bf16.c | 249 ++ .../non-policy/non-overloaded/vmflt-bf16.c | 249 ++ .../non-policy/non-overloaded/vmfne-bf16.c | 249 ++ .../non-policy/overloaded/vfadd-bf16.c | 249 ++ .../non-policy/overloaded/vfclass-bf16.c | 134 ++ .../non-policy/overloaded/vfmacc-bf16.c | 249 ++ .../non-policy/overloaded/vfmadd-bf16.c | 249 ++ .../non-policy/overloaded/vfmax-bf16.c | 249 ++ .../non-policy/overloaded/vfmerge-bf16.c | 69 + .../non-policy/overloaded/vfmin-bf16.c | 249 ++ .../non-policy/overloaded/vfmsac-bf16.c | 249 ++ .../non-policy/overloaded/vfmsub-bf16.c | 249 ++ .../non-policy/overloaded/vfmul-bf16.c | 249 ++ .../non-policy/overloaded/vfmv-bf16.c | 69 + .../non-policy/overloaded/vfncvt-bf16.c | 724 ++++++ .../non-policy/overloaded/vfncvt_rod-bf16.c | 113 + .../non-policy/overloaded/vfncvt_rtz-bf16.c | 267 +++ .../non-policy/overloaded/vfnmacc-bf16.c | 249 ++ .../non-policy/overloaded/vfnmadd-bf16.c | 249 ++ .../non-policy/overloaded/vfnmsac-bf16.c | 249 ++ .../non-policy/overloaded/vfnmsub-bf16.c | 249 ++ .../non-policy/overloaded/vfrec7-bf16.c | 129 ++ .../non-policy/overloaded/vfrsqrt7-bf16.c | 129 ++ .../non-policy/overloaded/vfrsub-bf16.c | 129 ++ .../non-policy/overloaded/vfsgnj-bf16.c | 249 ++ .../non-policy/overloaded/vfsgnjn-bf16.c | 249 ++ .../non-policy/overloaded/vfsgnjx-bf16.c | 249 ++ .../non-policy/overloaded/vfslide1down-bf16.c | 129 ++ .../non-policy/overloaded/vfslide1up-bf16.c | 129 ++ .../non-policy/overloaded/vfsub-bf16.c | 249 ++ .../non-policy/overloaded/vfwadd-bf16.c | 893 ++++++++ .../non-policy/overloaded/vfwcvt-bf16.c | 366 +++ .../non-policy/overloaded/vfwmacc-bf16.c | 474 ++++ .../non-policy/overloaded/vfwmsac-bf16.c | 474 ++++ .../non-policy/overloaded/vfwmul-bf16.c | 451 ++++ .../non-policy/overloaded/vfwnmacc-bf16.c | 480 ++++ .../non-policy/overloaded/vfwnmsac-bf16.c | 480 ++++ .../non-policy/overloaded/vfwsub-bf16.c | 893 ++++++++ .../non-policy/overloaded/vmfeq-bf16.c | 249 ++ .../non-policy/overloaded/vmfge-bf16.c | 249 ++ .../non-policy/overloaded/vmfgt-bf16.c | 249 ++ .../non-policy/overloaded/vmfle-bf16.c | 249 ++ .../non-policy/overloaded/vmflt-bf16.c | 249 ++ .../non-policy/overloaded/vmfne-bf16.c | 249 ++ .../policy/non-overloaded/vfadd-bf16.c | 489 ++++ .../policy/non-overloaded/vfclass-bf16.c | 272 +++ .../policy/non-overloaded/vfmacc-bf16.c | 489 ++++ .../policy/non-overloaded/vfmadd-bf16.c | 489 ++++ .../policy/non-overloaded/vfmax-bf16.c | 489 ++++ .../policy/non-overloaded/vfmerge-bf16.c | 69 + .../policy/non-overloaded/vfmin-bf16.c | 489 ++++ .../policy/non-overloaded/vfmsac-bf16.c | 489 ++++ .../policy/non-overloaded/vfmsub-bf16.c | 489 ++++ .../policy/non-overloaded/vfmul-bf16.c | 489 ++++ .../policy/non-overloaded/vfmv-bf16.c | 129 ++ .../policy/non-overloaded/vfncvt-bf16.c | 1577 +++++++++++++ .../policy/non-overloaded/vfncvt_rod-bf16.c | 233 ++ .../policy/non-overloaded/vfncvt_rtz-bf16.c | 572 +++++ .../policy/non-overloaded/vfnmacc-bf16.c | 489 ++++ .../policy/non-overloaded/vfnmadd-bf16.c | 489 ++++ .../policy/non-overloaded/vfnmsac-bf16.c | 489 ++++ .../policy/non-overloaded/vfnmsub-bf16.c | 489 ++++ .../policy/non-overloaded/vfrec7-bf16.c | 249 ++ .../policy/non-overloaded/vfrsqrt7-bf16.c | 249 ++ .../policy/non-overloaded/vfrsub-bf16.c | 249 ++ .../policy/non-overloaded/vfsgnj-bf16.c | 489 ++++ .../policy/non-overloaded/vfsgnjn-bf16.c | 489 ++++ .../policy/non-overloaded/vfsgnjx-bf16.c | 489 ++++ .../policy/non-overloaded/vfslide1down-bf16.c | 249 ++ .../policy/non-overloaded/vfslide1up-bf16.c | 249 ++ .../policy/non-overloaded/vfsub-bf16.c | 489 ++++ .../policy/non-overloaded/vfwadd-bf16.c | 2007 +++++++++++++++++ .../policy/non-overloaded/vfwcvt-bf16.c | 765 +++++++ .../policy/non-overloaded/vfwmacc-bf16.c | 1017 +++++++++ .../policy/non-overloaded/vfwmsac-bf16.c | 1017 +++++++++ .../policy/non-overloaded/vfwmul-bf16.c | 1015 +++++++++ .../policy/non-overloaded/vfwnmacc-bf16.c | 1034 +++++++++ .../policy/non-overloaded/vfwnmsac-bf16.c | 1034 +++++++++ .../policy/non-overloaded/vfwsub-bf16.c | 2007 +++++++++++++++++ .../policy/non-overloaded/vmfeq-bf16.c | 129 ++ .../policy/non-overloaded/vmfge-bf16.c | 129 ++ .../policy/non-overloaded/vmfgt-bf16.c | 129 ++ .../policy/non-overloaded/vmfle-bf16.c | 129 ++ .../policy/non-overloaded/vmflt-bf16.c | 129 ++ .../policy/non-overloaded/vmfne-bf16.c | 129 ++ .../policy/overloaded/vfadd-bf16.c | 489 ++++ .../policy/overloaded/vfclass-bf16.c | 272 +++ .../policy/overloaded/vfmacc-bf16.c | 489 ++++ .../policy/overloaded/vfmadd-bf16.c | 489 ++++ .../policy/overloaded/vfmax-bf16.c | 489 ++++ .../policy/overloaded/vfmerge-bf16.c | 69 + .../policy/overloaded/vfmin-bf16.c | 489 ++++ .../policy/overloaded/vfmsac-bf16.c | 489 ++++ .../policy/overloaded/vfmsub-bf16.c | 489 ++++ .../policy/overloaded/vfmul-bf16.c | 489 ++++ .../policy/overloaded/vfmv-bf16.c | 129 ++ .../policy/overloaded/vfncvt-bf16.c | 1539 +++++++++++++ .../policy/overloaded/vfncvt_rod-bf16.c | 233 ++ .../policy/overloaded/vfncvt_rtz-bf16.c | 572 +++++ .../policy/overloaded/vfnmacc-bf16.c | 489 ++++ .../policy/overloaded/vfnmadd-bf16.c | 489 ++++ .../policy/overloaded/vfnmsac-bf16.c | 489 ++++ .../policy/overloaded/vfnmsub-bf16.c | 489 ++++ .../policy/overloaded/vfrec7-bf16.c | 249 ++ .../policy/overloaded/vfrsqrt7-bf16.c | 249 ++ .../policy/overloaded/vfrsub-bf16.c | 249 ++ .../policy/overloaded/vfsgnj-bf16.c | 489 ++++ .../policy/overloaded/vfsgnjn-bf16.c | 489 ++++ .../policy/overloaded/vfsgnjx-bf16.c | 489 ++++ .../policy/overloaded/vfslide1down-bf16.c | 249 ++ .../policy/overloaded/vfslide1up-bf16.c | 249 ++ .../policy/overloaded/vfsub-bf16.c | 489 ++++ .../policy/overloaded/vfwadd-bf16.c | 1932 ++++++++++++++++ .../policy/overloaded/vfwcvt-bf16.c | 765 +++++++ .../policy/overloaded/vfwmacc-bf16.c | 977 ++++++++ .../policy/overloaded/vfwmsac-bf16.c | 977 ++++++++ .../policy/overloaded/vfwmul-bf16.c | 975 ++++++++ .../policy/overloaded/vfwnmacc-bf16.c | 994 ++++++++ .../policy/overloaded/vfwnmsac-bf16.c | 994 ++++++++ .../policy/overloaded/vfwsub-bf16.c | 1932 ++++++++++++++++ .../policy/overloaded/vmfeq-bf16.c | 129 ++ .../policy/overloaded/vmfge-bf16.c | 129 ++ .../policy/overloaded/vmfgt-bf16.c | 129 ++ .../policy/overloaded/vmfle-bf16.c | 129 ++ .../policy/overloaded/vmflt-bf16.c | 129 ++ .../policy/overloaded/vmfne-bf16.c | 129 ++ 168 files changed, 69139 insertions(+), 20 deletions(-) create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 07a8724b6f33d..96d8300a0faf3 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1013,9 +1013,9 @@ let ManualCodegen = [{ }] in { let HasFRMRoundModeOp = true in { // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions - defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; - defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; - defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; + defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; + defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; + defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW @@ -1023,14 +1023,14 @@ let ManualCodegen = [{ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions - defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; + defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode; defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode; } // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions - defm vfadd : RVVFloatingBinBuiltinSet; - defm vfsub : RVVFloatingBinBuiltinSet; - defm vfrsub : RVVFloatingBinVFBuiltinSet; + defm vfadd : RVVFloatingBinBuiltinSet; + defm vfsub : RVVFloatingBinBuiltinSet; + defm vfrsub : RVVFloatingBinVFBuiltinSet; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW @@ -1038,7 +1038,7 @@ let ManualCodegen = [{ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions - defm vfmul : RVVFloatingBinBuiltinSet; + defm vfmul : RVVFloatingBinBuiltinSet; defm vfdiv : RVVFloatingBinBuiltinSet; defm vfrdiv : RVVFloatingBinVFBuiltinSet; } @@ -1065,6 +1065,10 @@ let ManualCodegen = [{ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", [["vv", "w", "wvvu"], ["vf", "w", "wveu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y", + [["vv", "vw", "wvvu"], + ["vf", "vw", "wveu"]]>; } } // 13.3. Vector Widening Floating-Point Add/Subtract Instructions @@ -1081,6 +1085,10 @@ let ManualCodegen = [{ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y", + [["vv", "vw", "wvv"], + ["vf", "vw", "wve"]]>; } } } @@ -1170,6 +1178,8 @@ let ManualCodegen = [{ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>; let RequiredFeatures = ["zvfh"] in defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>; } // 13.8. Vector Floating-Point Square-Root Instruction defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>; @@ -1180,21 +1190,26 @@ let ManualCodegen = [{ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>; let RequiredFeatures = ["zvfh"] in defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>; } // 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>; let RequiredFeatures = ["zvfh"] in defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>; +let RequiredFeatures = ["zvfbfa"] in + defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>; + // 13.11. Vector Floating-Point MIN/MAX Instructions -defm vfmin : RVVFloatingBinBuiltinSet; -defm vfmax : RVVFloatingBinBuiltinSet; +defm vfmin : RVVFloatingBinBuiltinSet; +defm vfmax : RVVFloatingBinBuiltinSet; // 13.12. Vector Floating-Point Sign-Injection Instructions -defm vfsgnj : RVVFloatingBinBuiltinSet; -defm vfsgnjn : RVVFloatingBinBuiltinSet; -defm vfsgnjx : RVVFloatingBinBuiltinSet; +defm vfsgnj : RVVFloatingBinBuiltinSet; +defm vfsgnjn : RVVFloatingBinBuiltinSet; +defm vfsgnjx : RVVFloatingBinBuiltinSet; } defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">; let RequiredFeatures = ["zvfh"] in @@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in { defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>; let RequiredFeatures = ["zvfh"] in defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>; +let RequiredFeatures = ["zvfbfa"] in + defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>; } // 13.15. Vector Floating-Point Merge Instruction @@ -1239,6 +1256,9 @@ let HasMasked = false, let RequiredFeatures = ["zvfh"] in defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x", [["vfm", "v", "vvem"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y", + [["vfm", "v", "vvem"]]>; } // 13.16. Vector Floating-Point Move Instruction @@ -1252,6 +1272,9 @@ let HasMasked = false, let RequiredFeatures = ["zvfh"] in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x", [["f", "v", "ve"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y", + [["f", "v", "ve"]]>; } // 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions @@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>; } } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in { + defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>; + defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>; + } let OverloadedName = "vfwcvt_f" in { defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>; let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>; } } @@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>; } let OverloadedName = "vfncvt_rtz_x" in { defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>; } let OverloadedName = "vfncvt_rod_f" in { defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>; } // Zvfbfmin - Vector convert BF16 to FP32 @@ -1363,11 +1398,15 @@ let ManualCodegen = [{ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>; } let OverloadedName = "vfncvt_xu" in { defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>; } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>; @@ -1382,6 +1421,8 @@ let ManualCodegen = [{ let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>; } // Zvfbfmin - Vector convert FP32 to BF16 @@ -1430,11 +1471,15 @@ let ManualCodegen = [{ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>; } let OverloadedName = "vfncvt_xu" in { defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>; } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>; @@ -1449,6 +1494,8 @@ let ManualCodegen = [{ let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>; } // Zvfbfmin - Vector convert FP32 to BF16 @@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in { let RequiredFeatures = ["zvfh"] in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x", [["s", "ve", "ev"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y", + [["s", "ve", "ev"]]>; } let OverloadedName = "vfmv_s", UnMaskedPolicyScheme = HasPassthruOperand, @@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in { defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x", [["f", "v", "ve"], ["x", "Uv", "UvUe"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y", + [["f", "v", "ve"]]>; } } @@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet; // 16.3.3. Vector Slide1up Instructions let UnMaskedPolicyScheme = HasPassthruOperand in { defm vslide1up : RVVSlideOneBuiltinSet; -defm vfslide1up : RVVFloatingBinVFBuiltinSet; +defm vfslide1up : RVVFloatingBinVFBuiltinSet; // 16.3.4. Vector Slide1down Instruction defm vslide1down : RVVSlideOneBuiltinSet; -defm vfslide1down : RVVFloatingBinVFBuiltinSet; +defm vfslide1down : RVVFloatingBinVFBuiltinSet; // 16.4. Vector Register Gather Instructions // signed and floating type diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td index 767bcee7b1596..2a2a04998366a 100644 --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -470,6 +470,10 @@ let HasMaskedOffOperand = false in { defm "" : RVVOutOp1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; } multiclass RVVFloatingTerBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; } } @@ -491,6 +499,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in { defm "" : RVVOutOp1Op2BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1Op2BuiltinSet; } multiclass RVVFloatingWidenTerBuiltinSetRoundingMode { defm "" : RVVOutOp1Op2BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1Op2BuiltinSet; } } -multiclass RVVFloatingBinBuiltinSet { +multiclass RVVFloatingBinBuiltinSet { defm "" : RVVOutOp1BuiltinSet; @@ -511,9 +527,15 @@ multiclass RVVFloatingBinBuiltinSet { defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinBuiltinSetRoundingMode { +multiclass RVVFloatingBinBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; @@ -521,22 +543,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinVFBuiltinSet { +multiclass RVVFloatingBinVFBuiltinSet { defm "" : RVVOutOp1BuiltinSet; let RequiredFeatures = ["zvfh"] in defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinVFBuiltinSetRoundingMode { +multiclass RVVFloatingBinVFBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; let RequiredFeatures = ["zvfh"] in defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } multiclass RVVFloatingMaskOutBuiltinSet { @@ -547,6 +585,10 @@ multiclass RVVFloatingMaskOutBuiltinSet { defm "" : RVVOp0Op1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOp0Op1BuiltinSet; } multiclass RVVFloatingMaskOutVFBuiltinSet @@ -748,6 +790,10 @@ multiclass RVVFloatingWidenBinBuiltinSet { defm "" : RVVWidenBuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenBuiltinSet; } multiclass RVVFloatingWidenBinBuiltinSetRoundingMode { @@ -758,6 +804,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode { defm "" : RVVWidenBuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenBuiltinSet; } multiclass RVVFloatingWidenOp0BinBuiltinSet { @@ -768,6 +818,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet { defm "" : RVVWidenWOp0BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenWOp0BuiltinSet; } multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode { @@ -778,4 +832,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode { defm "" : RVVWidenWOp0BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenWOp0BuiltinSet; } diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp index 3ba93ff98898b..c5ef0d535628d 100644 --- a/clang/lib/Sema/SemaRISCV.cpp +++ b/clang/lib/Sema/SemaRISCV.cpp @@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D, } else if (Info.ElementType->isBFloat16Type() && !FeatureMap.lookup("zvfbfmin") && - !FeatureMap.lookup("xandesvbfhcvt")) + !FeatureMap.lookup("xandesvbfhcvt") && + !FeatureMap.lookup("experimental-zvfbfa")) if (DeclareAndesVectorBuiltins) { Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin or xandesvbfhcvt"; diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp index 5a4e805d4a9d1..dad3d0dae423a 100644 --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor( case 'F': TM |= TypeModifier::Float; break; + case 'Y': + TM |= TypeModifier::BFloat; + break; case 'S': TM |= TypeModifier::LMUL1; break; @@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) { ElementBitwidth *= 2; LMUL.MulLog2LMUL(1); Scale = LMUL.getScale(ElementBitwidth); + if (ScalarType == ScalarTypeKind::BFloat) + ScalarType = ScalarTypeKind::Float; break; case VectorTypeModifier::Widening4XVector: ElementBitwidth *= 4; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c new file mode 100644 index 0000000000000..d7734e05a8aaa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c new file mode 100644 index 0000000000000..68814f4672d05 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c @@ -0,0 +1,134 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c new file mode 100644 index 0000000000000..616455d5f3f9e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c new file mode 100644 index 0000000000000..eec662a3671c8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c new file mode 100644 index 0000000000000..dfdeb4e967a46 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c new file mode 100644 index 0000000000000..96221c5385dd9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c new file mode 100644 index 0000000000000..8f8d82ba21bc5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c new file mode 100644 index 0000000000000..f4644dfb8d7e7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c new file mode 100644 index 0000000000000..07053afa3355c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c new file mode 100644 index 0000000000000..88fb329934365 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c new file mode 100644 index 0000000000000..d80ec3df1bbaa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c @@ -0,0 +1,189 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m1(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m8(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) { + return __riscv_vfmv_f_s_bf16mf4_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) { + return __riscv_vfmv_f_s_bf16mf2_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) { + return __riscv_vfmv_f_s_bf16m1_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m1(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) { + return __riscv_vfmv_f_s_bf16m2_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) { + return __riscv_vfmv_f_s_bf16m4_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) { + return __riscv_vfmv_f_s_bf16m8_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m8(src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c new file mode 100644 index 0000000000000..a5afab9bec1ec --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c @@ -0,0 +1,724 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c new file mode 100644 index 0000000000000..70c377bba1dfb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c @@ -0,0 +1,113 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c new file mode 100644 index 0000000000000..854e9868109e4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c @@ -0,0 +1,267 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c new file mode 100644 index 0000000000000..18484883a14f4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c new file mode 100644 index 0000000000000..e519e5acb4575 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c new file mode 100644 index 0000000000000..47e1f44f5a45f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c new file mode 100644 index 0000000000000..4b55b64542c61 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c new file mode 100644 index 0000000000000..1ffee73e91d04 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c new file mode 100644 index 0000000000000..964c4869622aa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c new file mode 100644 index 0000000000000..c7c3869e7b77c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c new file mode 100644 index 0000000000000..778b8b83e9841 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c new file mode 100644 index 0000000000000..7de308978e1d3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c new file mode 100644 index 0000000000000..5fa285cc78b63 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c new file mode 100644 index 0000000000000..b94d26b4ddf40 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c new file mode 100644 index 0000000000000..06e8b49af19d0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c new file mode 100644 index 0000000000000..2423b0bbdbb80 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c new file mode 100644 index 0000000000000..24d34f46f4203 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c @@ -0,0 +1,899 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c new file mode 100644 index 0000000000000..fb3e0031af98c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c @@ -0,0 +1,366 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c new file mode 100644 index 0000000000000..be09003320386 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c @@ -0,0 +1,486 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c new file mode 100644 index 0000000000000..749081333c2b3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c @@ -0,0 +1,486 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c new file mode 100644 index 0000000000000..6783ba43b0570 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c @@ -0,0 +1,455 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c new file mode 100644 index 0000000000000..6127a94c919d9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c @@ -0,0 +1,494 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c new file mode 100644 index 0000000000000..f37dd310d944d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c @@ -0,0 +1,494 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c new file mode 100644 index 0000000000000..510ff9193389e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c @@ -0,0 +1,899 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c new file mode 100644 index 0000000000000..669d0427b569a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c new file mode 100644 index 0000000000000..b169efd51a0b4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c new file mode 100644 index 0000000000000..9aea7d24b0edc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c new file mode 100644 index 0000000000000..40f0c27f5b37a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c new file mode 100644 index 0000000000000..f64eee3effdaf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c new file mode 100644 index 0000000000000..809ea5628e394 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c new file mode 100644 index 0000000000000..9d6b071c2768c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c new file mode 100644 index 0000000000000..2760f85a45d3c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c @@ -0,0 +1,134 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c new file mode 100644 index 0000000000000..ae3f1f24eb762 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c new file mode 100644 index 0000000000000..db2184c6e1dba --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c new file mode 100644 index 0000000000000..66497bfb15934 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c new file mode 100644 index 0000000000000..1dc290bf1a222 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c new file mode 100644 index 0000000000000..1564d1195ecef --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c new file mode 100644 index 0000000000000..0384e7da29e08 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c new file mode 100644 index 0000000000000..306f189b54c89 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c new file mode 100644 index 0000000000000..fffd83a12d36c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c new file mode 100644 index 0000000000000..f85378ff0f37a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) { + return __riscv_vfmv_f(src); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c new file mode 100644 index 0000000000000..fb635d61670c2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c @@ -0,0 +1,724 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c new file mode 100644 index 0000000000000..1ad856df48c4b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c @@ -0,0 +1,113 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c new file mode 100644 index 0000000000000..12d08934bd49d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c @@ -0,0 +1,267 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c new file mode 100644 index 0000000000000..6f7928b763230 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c new file mode 100644 index 0000000000000..97d207040e5b2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c new file mode 100644 index 0000000000000..404b4f83c4f1f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c new file mode 100644 index 0000000000000..3a520dd9af9bf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c new file mode 100644 index 0000000000000..462b6acf8eee6 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c new file mode 100644 index 0000000000000..051fde7ef5472 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c new file mode 100644 index 0000000000000..04941823d1917 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c new file mode 100644 index 0000000000000..615deddf4254f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c new file mode 100644 index 0000000000000..a895e5f6ad6bd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c new file mode 100644 index 0000000000000..0187516d00dc1 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c new file mode 100644 index 0000000000000..4a7689492de92 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c new file mode 100644 index 0000000000000..f9f2dc0cf26b8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c new file mode 100644 index 0000000000000..ebcf6fa4669a3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c new file mode 100644 index 0000000000000..124e7fb6de342 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c @@ -0,0 +1,893 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c new file mode 100644 index 0000000000000..0399a639898a0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c @@ -0,0 +1,366 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c new file mode 100644 index 0000000000000..2eb7fc8cdeea8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c @@ -0,0 +1,474 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c new file mode 100644 index 0000000000000..28f507619b428 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c @@ -0,0 +1,474 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c new file mode 100644 index 0000000000000..8de49fa586dcf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c @@ -0,0 +1,451 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c new file mode 100644 index 0000000000000..783693106a217 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c @@ -0,0 +1,480 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c new file mode 100644 index 0000000000000..ca936af7140e5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c @@ -0,0 +1,480 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c new file mode 100644 index 0000000000000..2e22e22e4b6fc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c @@ -0,0 +1,893 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c new file mode 100644 index 0000000000000..29881c9ba5d5e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c new file mode 100644 index 0000000000000..b8083c5ebb8df --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c new file mode 100644 index 0000000000000..b8749b3295a19 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c new file mode 100644 index 0000000000000..724608c89e8de --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c new file mode 100644 index 0000000000000..1b0b898d5053f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c new file mode 100644 index 0000000000000..672c1504d73eb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c new file mode 100644 index 0000000000000..6d55279ae306f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c new file mode 100644 index 0000000000000..8e6946de398b0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c new file mode 100644 index 0000000000000..2d4e481b1f8bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c new file mode 100644 index 0000000000000..511e073e10143 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c new file mode 100644 index 0000000000000..f3698d41aff34 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c new file mode 100644 index 0000000000000..bcaf2cb8817e4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c new file mode 100644 index 0000000000000..911f8792e4436 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c new file mode 100644 index 0000000000000..9575ad337de8c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c new file mode 100644 index 0000000000000..8e382f710ab5a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c new file mode 100644 index 0000000000000..716f056f3e12c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c new file mode 100644 index 0000000000000..069ee6a2a5df3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c new file mode 100644 index 0000000000000..36d4fc332499f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c @@ -0,0 +1,1577 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c new file mode 100644 index 0000000000000..84066846e188c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c @@ -0,0 +1,233 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c new file mode 100644 index 0000000000000..4644eff12e210 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c @@ -0,0 +1,572 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c new file mode 100644 index 0000000000000..93fd6ba2faff7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c new file mode 100644 index 0000000000000..d7e6b8225d8c0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c new file mode 100644 index 0000000000000..e0c289d23c17f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c new file mode 100644 index 0000000000000..05ccda36e9032 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c new file mode 100644 index 0000000000000..3123692b45a68 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c new file mode 100644 index 0000000000000..8436f0ea488b2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c new file mode 100644 index 0000000000000..7dd2bb6cc502d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c new file mode 100644 index 0000000000000..b39a0be2238a9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c new file mode 100644 index 0000000000000..7542e7866c888 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c new file mode 100644 index 0000000000000..104149e1b2fa4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c new file mode 100644 index 0000000000000..228dc1cd064a8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c new file mode 100644 index 0000000000000..9e6ff2b16f77d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c new file mode 100644 index 0000000000000..b6fd94ece20cd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c new file mode 100644 index 0000000000000..4bee376cfe0fb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c @@ -0,0 +1,2007 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c new file mode 100644 index 0000000000000..9151319fcfb17 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c @@ -0,0 +1,765 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c new file mode 100644 index 0000000000000..f67b1001a5fcb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c @@ -0,0 +1,1017 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c new file mode 100644 index 0000000000000..6d78c74e14694 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c @@ -0,0 +1,1017 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c new file mode 100644 index 0000000000000..9fcfe818be71f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c @@ -0,0 +1,1015 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c new file mode 100644 index 0000000000000..73cc82219b201 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c @@ -0,0 +1,1034 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c new file mode 100644 index 0000000000000..6133230e5cd84 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c @@ -0,0 +1,1034 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c new file mode 100644 index 0000000000000..9d9b0b0aa2f3c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c @@ -0,0 +1,2007 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c new file mode 100644 index 0000000000000..b96aae5615db8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c new file mode 100644 index 0000000000000..47d0427abf99b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c new file mode 100644 index 0000000000000..0a0ead22361e9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c new file mode 100644 index 0000000000000..27ddefec8c9c0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c new file mode 100644 index 0000000000000..d5f4f777580d3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c new file mode 100644 index 0000000000000..c2df9474acc72 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c new file mode 100644 index 0000000000000..2bd3b3995c6c8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c new file mode 100644 index 0000000000000..e2a993aca7069 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c new file mode 100644 index 0000000000000..eb7427107c942 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c new file mode 100644 index 0000000000000..68d490d04ff86 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c new file mode 100644 index 0000000000000..5f682e80b61b5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c new file mode 100644 index 0000000000000..9593ad5b5a592 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c new file mode 100644 index 0000000000000..f3ef3c3614027 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c new file mode 100644 index 0000000000000..0587c57af2b06 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c new file mode 100644 index 0000000000000..2ad26f8b24e12 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c new file mode 100644 index 0000000000000..d1e726a9d63ea --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c new file mode 100644 index 0000000000000..9fd1ffce446bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c new file mode 100644 index 0000000000000..c6cd0a55fa530 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c @@ -0,0 +1,1539 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c new file mode 100644 index 0000000000000..0745633042d44 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c @@ -0,0 +1,233 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c new file mode 100644 index 0000000000000..b906c5f411064 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c @@ -0,0 +1,572 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c new file mode 100644 index 0000000000000..cc487b49429dd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c new file mode 100644 index 0000000000000..f9c348b3dbb0b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c new file mode 100644 index 0000000000000..83d35e81403ce --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c new file mode 100644 index 0000000000000..f5282a195131d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c new file mode 100644 index 0000000000000..f8e5a339c87d7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c new file mode 100644 index 0000000000000..7c6c926e50c10 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c new file mode 100644 index 0000000000000..c09caeb8207af --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c new file mode 100644 index 0000000000000..c1f69932f6a02 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c new file mode 100644 index 0000000000000..1b799d87d8131 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c new file mode 100644 index 0000000000000..9c5f2af3d6e8f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c new file mode 100644 index 0000000000000..691302e245427 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c new file mode 100644 index 0000000000000..1238d2204c6d4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c new file mode 100644 index 0000000000000..ea4f8f043d6bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c new file mode 100644 index 0000000000000..e5b7b8da1f3cc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c @@ -0,0 +1,1932 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c new file mode 100644 index 0000000000000..730010421b944 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c @@ -0,0 +1,765 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c new file mode 100644 index 0000000000000..b05f88028874d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c @@ -0,0 +1,977 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c new file mode 100644 index 0000000000000..93721f6a889d9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c @@ -0,0 +1,977 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c new file mode 100644 index 0000000000000..4a2b5e39ef2cf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c @@ -0,0 +1,975 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c new file mode 100644 index 0000000000000..57e433441cd40 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c @@ -0,0 +1,994 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c new file mode 100644 index 0000000000000..42da060126a2b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c @@ -0,0 +1,994 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c new file mode 100644 index 0000000000000..1378bc963b216 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c @@ -0,0 +1,1932 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c new file mode 100644 index 0000000000000..3945f826809b0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c new file mode 100644 index 0000000000000..82586da09b325 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c new file mode 100644 index 0000000000000..75ccbbc1e8a1e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c new file mode 100644 index 0000000000000..49ff1c9812d62 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c new file mode 100644 index 0000000000000..24b3f9c16d943 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c new file mode 100644 index 0000000000000..ca3e134910dfb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + From a622f6ec4e04bcc5ec8fa681e679adad1c8c7e44 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 29 Sep 2025 17:54:08 +0800 Subject: [PATCH 03/11] fixup! [RISCV][llvm] Support Zvfbfa codegen and vsetvli insertion --- .../RISCV/rvv/mixed-float-bf16-arith.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll | 24 ++++++++-------- .../test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll | 10 +++---- .../test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll | 12 ++++---- .../CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll | 24 ++++++++-------- .../test/CodeGen/RISCV/rvv/vfslide1down-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll | 24 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll | 20 ++++++------- llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll | 28 +++++++++---------- llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll | 12 ++++---- llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll | 20 ++++++------- llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll | 20 ++++++------- llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll | 28 +++++++++---------- 29 files changed, 267 insertions(+), 267 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll index 1498ac9ca48e9..145fc5dee4b4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -35,13 +35,13 @@ define @test_half_bf16( %0, @llvm.riscv.vfadd.nxv1f16.nxv1f16( - undef, + poison, %3, %4, iXLen 0, iXLen %2) %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -64,13 +64,13 @@ define @test_i32_bf16( %0, @llvm.riscv.vadd.nxv1i32.nxv1i32( - undef, + poison, %3, %4, iXLen %2) %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -96,19 +96,19 @@ define @test_half_bf16_half( %0, @llvm.riscv.vfadd.nxv1f16.nxv1f16( - undef, + poison, %3, %4, iXLen 0, iXLen %2) %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) %c = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( - undef, + poison, %a, %4, iXLen 0, iXLen %2) @@ -134,19 +134,19 @@ define @test_bf16_half_bf16( %0, @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) %b = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( - undef, + poison, %3, %4, iXLen 0, iXLen %2) %c = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %a, %1, iXLen 0, iXLen %2) @@ -169,13 +169,13 @@ define @test_bf16_i16( %0, @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) %b = call @llvm.riscv.vadd.nxv1i16.nxv1i16( - undef, + poison, %3, %4, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll index 7a9c6990b6137..db1b081258d5f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -70,7 +70,7 @@ define @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -120,7 +120,7 @@ define @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -170,7 +170,7 @@ define @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -220,7 +220,7 @@ define @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -270,7 +270,7 @@ define @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -321,7 +321,7 @@ define @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfadd.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -371,7 +371,7 @@ define @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfadd.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -421,7 +421,7 @@ define @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfadd.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -471,7 +471,7 @@ define @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfadd.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -521,7 +521,7 @@ define @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfadd.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -571,7 +571,7 @@ define @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfadd.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll index c9e9d75e3e34a..d7d49b379b5a4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll @@ -19,7 +19,7 @@ define @intrinsic_vfclass_v_nxv1i16_nxv1bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv1i16.nxv1bf16( - undef, + poison, %0, iXLen %1) @@ -67,7 +67,7 @@ define @intrinsic_vfclass_v_nxv2i16_nxv2bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv2i16.nxv2bf16( - undef, + poison, %0, iXLen %1) @@ -115,7 +115,7 @@ define @intrinsic_vfclass_v_nxv4i16_nxv4bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv4i16.nxv4bf16( - undef, + poison, %0, iXLen %1) @@ -163,7 +163,7 @@ define @intrinsic_vfclass_v_nxv8i16_nxv8bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv8i16.nxv8bf16( - undef, + poison, %0, iXLen %1) @@ -211,7 +211,7 @@ define @intrinsic_vfclass_v_nxv16i16_nxv16bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv16i16.nxv16bf16( - undef, + poison, %0, iXLen %1) @@ -259,7 +259,7 @@ define @intrinsic_vfclass_v_nxv32i16_nxv32bf16( iXLen %1) nounwind { entry: %a = call @llvm.riscv.vfclass.nxv32i16.nxv32bf16( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll index b419c02b63384..a337d3061ce78 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen %2) @@ -301,7 +301,7 @@ define @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfmax.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -348,7 +348,7 @@ define @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfmax.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -395,7 +395,7 @@ define @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfmax.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -442,7 +442,7 @@ define @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfmax.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -489,7 +489,7 @@ define @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfmax.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -536,7 +536,7 @@ define @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfmax.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll index 00a034f9b8595..86ba7c7fb7fe6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll @@ -19,7 +19,7 @@ define @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfmerge.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -43,7 +43,7 @@ define @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfmerge.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -67,7 +67,7 @@ define @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfmerge.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -91,7 +91,7 @@ define @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfmerge.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -115,7 +115,7 @@ define @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfmerge.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -139,7 +139,7 @@ define @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfmerge.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, %2, @@ -157,7 +157,7 @@ define @intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfmerge.nxv1bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, @@ -175,7 +175,7 @@ define @intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfmerge.nxv2bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, @@ -193,7 +193,7 @@ define @intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfmerge.nxv4bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, @@ -211,7 +211,7 @@ define @intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfmerge.nxv8bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, @@ -229,7 +229,7 @@ define @intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfmerge.nxv16bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, @@ -247,7 +247,7 @@ define @intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfmerge.nxv32bf16.bf16( - undef, + poison, %0, bfloat zeroinitializer, %1, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll index 486d43b2e9d49..37c0cf506a6fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen %2) @@ -301,7 +301,7 @@ define @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfmin.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -348,7 +348,7 @@ define @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfmin.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -395,7 +395,7 @@ define @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfmin.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -442,7 +442,7 @@ define @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfmin.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -489,7 +489,7 @@ define @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfmin.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -536,7 +536,7 @@ define @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfmin.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll index e1914f4c826a8..44bce723c39d4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -70,7 +70,7 @@ define @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -120,7 +120,7 @@ define @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -170,7 +170,7 @@ define @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -220,7 +220,7 @@ define @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -270,7 +270,7 @@ define @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -321,7 +321,7 @@ define @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfmul.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -371,7 +371,7 @@ define @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfmul.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -421,7 +421,7 @@ define @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfmul.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -471,7 +471,7 @@ define @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfmul.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -521,7 +521,7 @@ define @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfmul.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -571,7 +571,7 @@ define @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfmul.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll index ceefe6b3aa83b..f3293ddc83ef9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll @@ -17,7 +17,7 @@ define @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -37,7 +37,7 @@ define @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -57,7 +57,7 @@ define @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -77,7 +77,7 @@ define @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -97,7 +97,7 @@ define @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -117,7 +117,7 @@ define @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( - undef, + poison, bfloat %0, iXLen %1) @@ -132,7 +132,7 @@ define @intrinsic_vfmv.v.f_zero_nxv1bf16(iXLen %0) nounwin ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( - undef, + poison, bfloat 0.0, iXLen %0) @@ -147,7 +147,7 @@ define @intrinsic_vmv.v.i_zero_nxv2bf16(iXLen %0) nounwind ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( - undef, + poison, bfloat 0.0, iXLen %0) @@ -162,7 +162,7 @@ define @intrinsic_vmv.v.i_zero_nxv4bf16(iXLen %0) nounwind ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( - undef, + poison, bfloat 0.0, iXLen %0) @@ -177,7 +177,7 @@ define @intrinsic_vmv.v.i_zero_nxv8bf16(iXLen %0) nounwind ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( - undef, + poison, bfloat 0.0, iXLen %0) @@ -192,7 +192,7 @@ define @intrinsic_vmv.v.i_zero_nxv16bf16(iXLen %0) nounwi ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( - undef, + poison, bfloat 0.0, iXLen %0) @@ -207,7 +207,7 @@ define @intrinsic_vmv.v.i_zero_nxv32bf16(iXLen %0) nounwi ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( - undef, + poison, bfloat 0.0, iXLen %0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll index 0864e03d61f86..7d587fd55cd83 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll @@ -18,7 +18,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( - undef, + poison, %0, iXLen %1) @@ -62,7 +62,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( - undef, + poison, %0, iXLen %1) @@ -106,7 +106,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( - undef, + poison, %0, iXLen %1) @@ -150,7 +150,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( - undef, + poison, %0, iXLen %1) @@ -194,7 +194,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32( @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll index 40e652948d1a5..ee9e3d1b9f630 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( - undef, + poison, %0, iXLen %1) @@ -62,7 +62,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( - undef, + poison, %0, iXLen %1) @@ -106,7 +106,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( - undef, + poison, %0, iXLen %1) @@ -150,7 +150,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( - undef, + poison, %0, iXLen %1) @@ -194,7 +194,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( - undef, + poison, %0, iXLen %1) @@ -238,7 +238,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16( @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll index 1834f183e96d6..521f7274dc5c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( - undef, + poison, %0, iXLen %1) @@ -62,7 +62,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( - undef, + poison, %0, iXLen %1) @@ -106,7 +106,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( - undef, + poison, %0, iXLen %1) @@ -150,7 +150,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( - undef, + poison, %0, iXLen %1) @@ -194,7 +194,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( - undef, + poison, %0, iXLen %1) @@ -238,7 +238,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16( @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll index fe2120ac612a8..ab9ebade287e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16( @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -67,7 +67,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16( @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -114,7 +114,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16( @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -161,7 +161,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16( @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -208,7 +208,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16( @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -255,7 +255,7 @@ define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16( @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( - undef, + poison, %0, iXLen 0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll index 95ba5e94a9cd4..61c6803ce12bd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16( @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -67,7 +67,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16( @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -114,7 +114,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16( @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -161,7 +161,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16( @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -208,7 +208,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16( @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -255,7 +255,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16( @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( - undef, + poison, %0, iXLen 0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll index 23de020938f73..1211415ffe432 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll @@ -19,7 +19,7 @@ define @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16( @llvm.riscv.vfrec7.nxv1bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -65,7 +65,7 @@ define @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16( @llvm.riscv.vfrec7.nxv2bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -111,7 +111,7 @@ define @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16( @llvm.riscv.vfrec7.nxv4bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -157,7 +157,7 @@ define @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16( @llvm.riscv.vfrec7.nxv8bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -203,7 +203,7 @@ define @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16( @llvm.riscv.vfrec7.nxv16bf16( - undef, + poison, %0, iXLen 0, iXLen %1) @@ -249,7 +249,7 @@ define @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16( @llvm.riscv.vfrec7.nxv32bf16( - undef, + poison, %0, iXLen 0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll index c08363a0d7bf5..4626b865ab454 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll @@ -17,7 +17,7 @@ define @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16( @llvm.riscv.vfrsqrt7.nxv1bf16( - undef, + poison, %0, iXLen %1) @@ -60,7 +60,7 @@ define @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16( @llvm.riscv.vfrsqrt7.nxv2bf16( - undef, + poison, %0, iXLen %1) @@ -103,7 +103,7 @@ define @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16( @llvm.riscv.vfrsqrt7.nxv4bf16( - undef, + poison, %0, iXLen %1) @@ -146,7 +146,7 @@ define @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16( @llvm.riscv.vfrsqrt7.nxv8bf16( - undef, + poison, %0, iXLen %1) @@ -189,7 +189,7 @@ define @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16( @llvm.riscv.vfrsqrt7.nxv16bf16( - undef, + poison, %0, iXLen %1) @@ -232,7 +232,7 @@ define @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16( @llvm.riscv.vfrsqrt7.nxv32bf16( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll index 8ca926ac04b19..54a6d48cfcc5b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfrsub.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -64,7 +64,7 @@ define @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfrsub.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -110,7 +110,7 @@ define @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfrsub.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -156,7 +156,7 @@ define @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfrsub.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -202,7 +202,7 @@ define @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfrsub.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -248,7 +248,7 @@ define @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfrsub.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll index 7cd3b117f0cc9..2cd698d9aaa3c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen %2) @@ -301,7 +301,7 @@ define @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfsgnj.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -348,7 +348,7 @@ define @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfsgnj.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -395,7 +395,7 @@ define @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfsgnj.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -442,7 +442,7 @@ define @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfsgnj.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -489,7 +489,7 @@ define @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfsgnj.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -536,7 +536,7 @@ define @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfsgnj.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll index ea63a7d737086..08340becc9ed4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen %2) @@ -301,7 +301,7 @@ define @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfsgnjn.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfsgnjn.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -395,7 +395,7 @@ define @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfsgnjn.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -442,7 +442,7 @@ define @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfsgnjn.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -489,7 +489,7 @@ define @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfsgnjn.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -536,7 +536,7 @@ define @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfsgnjn.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll index 3ed8d13d8d17d..e51a42e2b8cea 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen %2) @@ -301,7 +301,7 @@ define @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfsgnjx.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -348,7 +348,7 @@ define @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfsgnjx.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -395,7 +395,7 @@ define @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfsgnjx.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -442,7 +442,7 @@ define @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfsgnjx.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -489,7 +489,7 @@ define @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfsgnjx.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -536,7 +536,7 @@ define @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfsgnjx.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll index aa4efcba59753..c65719c3a4c1a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -65,7 +65,7 @@ define @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -112,7 +112,7 @@ define @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -159,7 +159,7 @@ define @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -206,7 +206,7 @@ define @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -253,7 +253,7 @@ define @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll index baf9f5a15b08b..57a48986fdfcd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll @@ -19,7 +19,7 @@ define @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfslide1up.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -67,7 +67,7 @@ define @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfslide1up.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -115,7 +115,7 @@ define @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfslide1up.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -163,7 +163,7 @@ define @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfslide1up.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -211,7 +211,7 @@ define @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) @@ -259,7 +259,7 @@ define @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll index 2afb375149e49..aea75211b70b5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll @@ -18,7 +18,7 @@ define @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -64,7 +64,7 @@ define @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -110,7 +110,7 @@ define @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -156,7 +156,7 @@ define @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -202,7 +202,7 @@ define @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -248,7 +248,7 @@ define @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( - undef, + poison, %0, %1, iXLen 7, iXLen %2) @@ -295,7 +295,7 @@ define @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16( @llvm.riscv.vfsub.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -341,7 +341,7 @@ define @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16( @llvm.riscv.vfsub.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -387,7 +387,7 @@ define @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16( @llvm.riscv.vfsub.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -433,7 +433,7 @@ define @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16( @llvm.riscv.vfsub.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -479,7 +479,7 @@ define @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16( @llvm.riscv.vfsub.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) @@ -525,7 +525,7 @@ define @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16( @llvm.riscv.vfsub.nxv32bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 7, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll index bdffe3b2d61dc..62feac824efad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll @@ -21,7 +21,7 @@ define @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16( @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -72,7 +72,7 @@ define @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16( @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -124,7 +124,7 @@ define @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16( @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -176,7 +176,7 @@ define @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16( @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -228,7 +228,7 @@ define @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -279,7 +279,7 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16( @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -330,7 +330,7 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16( @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -381,7 +381,7 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16( @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -432,7 +432,7 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16( @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -483,7 +483,7 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16( @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll index dd345d0b0b786..69c9a4ea75c97 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16( @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -70,7 +70,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16( @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -120,7 +120,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16( @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -170,7 +170,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16( @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -220,7 +220,7 @@ define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -271,7 +271,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16( @llvm.riscv.vfwadd.w.nxv1f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -321,7 +321,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16( @llvm.riscv.vfwadd.w.nxv2f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -371,7 +371,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16( @llvm.riscv.vfwadd.w.nxv4f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -421,7 +421,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16( @llvm.riscv.vfwadd.w.nxv8f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -471,7 +471,7 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16( @llvm.riscv.vfwadd.w.nxv16f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -706,7 +706,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -725,7 +725,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -744,7 +744,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -763,7 +763,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll index ef7e695b292e9..b7df45bad36e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll @@ -18,7 +18,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8( @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( - undef, + poison, %0, iXLen %1) @@ -61,7 +61,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8( @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( - undef, + poison, %0, iXLen %1) @@ -104,7 +104,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8( @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( - undef, + poison, %0, iXLen %1) @@ -147,7 +147,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8( @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( - undef, + poison, %0, iXLen %1) @@ -190,7 +190,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8( @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( - undef, + poison, %0, iXLen %1) @@ -233,7 +233,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8( @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll index 174bf7a710ea6..c370261a77bc0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll @@ -18,7 +18,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8( @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( - undef, + poison, %0, iXLen %1) @@ -61,7 +61,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8( @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( - undef, + poison, %0, iXLen %1) @@ -104,7 +104,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8( @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( - undef, + poison, %0, iXLen %1) @@ -147,7 +147,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8( @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( - undef, + poison, %0, iXLen %1) @@ -190,7 +190,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8( @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( - undef, + poison, %0, iXLen %1) @@ -233,7 +233,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8( @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( - undef, + poison, %0, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll index 7e4814e998c7e..577b93af7a918 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll @@ -21,7 +21,7 @@ define @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16( @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -72,7 +72,7 @@ define @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16( @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -124,7 +124,7 @@ define @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16( @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -176,7 +176,7 @@ define @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16( @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -228,7 +228,7 @@ define @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -279,7 +279,7 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16( @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -330,7 +330,7 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16( @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -381,7 +381,7 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16( @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -432,7 +432,7 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16( @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -483,7 +483,7 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16( @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll index 4c509faab1bab..d993e4e610d2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll @@ -21,7 +21,7 @@ define @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16( @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -72,7 +72,7 @@ define @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16( @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -124,7 +124,7 @@ define @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16( @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -176,7 +176,7 @@ define @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16( @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -228,7 +228,7 @@ define @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16(< ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -279,7 +279,7 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16( @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -330,7 +330,7 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16( @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -381,7 +381,7 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16( @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -432,7 +432,7 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16( @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -483,7 +483,7 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16( @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll index f86ebf1aaf99e..11066d9487684 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -20,7 +20,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16( @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -70,7 +70,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16( @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -120,7 +120,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16( @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -170,7 +170,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16( @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -220,7 +220,7 @@ define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16( ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( - undef, + poison, %0, %1, iXLen 0, iXLen %2) @@ -271,7 +271,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16( @llvm.riscv.vfwsub.w.nxv1f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -321,7 +321,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16( @llvm.riscv.vfwsub.w.nxv2f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -371,7 +371,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16( @llvm.riscv.vfwsub.w.nxv4f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -421,7 +421,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16( @llvm.riscv.vfwsub.w.nxv8f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -471,7 +471,7 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16( @llvm.riscv.vfwsub.w.nxv16f32.bf16( - undef, + poison, %0, bfloat %1, iXLen 0, iXLen %2) @@ -706,7 +706,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -725,7 +725,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -744,7 +744,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) @@ -763,7 +763,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf1 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( - undef, + poison, %1, %0, iXLen 0, iXLen %2) From 9a15c8720278b1ccd561aa4c7fb19aceb9f73660 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 29 Sep 2025 17:59:14 +0800 Subject: [PATCH 04/11] fixup! [RISCV][llvm] Support Zvfbfa codegen and vsetvli insertion --- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 3 ++- llvm/lib/Target/RISCV/RISCVSubtarget.h | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 40b5e27e32384..cd5e9b4c9bc3f 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -671,7 +671,8 @@ class VSETVLIInfo { unsigned encodeVTYPE() const { assert(isValid() && !isUnknown() && !SEWLMULRatioOnly && "Can't encode VTYPE for uninitialized or unknown"); - return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt); + return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, + AltFmt); } bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; } diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index da666ca551fa5..2e3d46e7bd013 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -288,10 +288,14 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool hasVInstructionsI64() const { return HasStdExtZve64x; } bool hasVInstructionsF16Minimal() const { return HasStdExtZvfhmin; } bool hasVInstructionsF16() const { return HasStdExtZvfh; } - bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin || HasStdExtZvfbfa; } + bool hasVInstructionsBF16Minimal() const { + return HasStdExtZvfbfmin || HasStdExtZvfbfa; + } bool hasVInstructionsF32() const { return HasStdExtZve32f; } bool hasVInstructionsF64() const { return HasStdExtZve64d; } - bool hasVInstructionsBF16() const { return HasStdExtZvfbfmin || HasStdExtZvfbfa; } + bool hasVInstructionsBF16() const { + return HasStdExtZvfbfmin || HasStdExtZvfbfa; + } // F16 and F64 both require F32. bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); } bool hasVInstructionsFullMultiply() const { return HasStdExtV; } From 54b5e853d15be6c39759ae18660cd9810b08fd3f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 30 Sep 2025 19:51:25 -0700 Subject: [PATCH 05/11] fixup! Address review comments --- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 14 +++++++------- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 2 +- llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td | 13 ++++++------- llvm/lib/Target/RISCV/RISCVSubtarget.h | 4 +--- 4 files changed, 15 insertions(+), 18 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index cd5e9b4c9bc3f..7cc1bba1bb78c 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -164,7 +164,7 @@ struct DemandedFields { // If this is true, we demand that VTYPE is set to some legal state, i.e. that // vill is unset. bool VILL = false; - bool UseAltFmt = false; + bool AltFmt = false; // Return true if any part of VTYPE was used bool usedVTYPE() const { @@ -184,7 +184,7 @@ struct DemandedFields { TailPolicy = true; MaskPolicy = true; VILL = true; - UseAltFmt = true; + AltFmt = true; } // Mark all VL properties as demanded @@ -210,7 +210,7 @@ struct DemandedFields { TailPolicy |= B.TailPolicy; MaskPolicy |= B.MaskPolicy; VILL |= B.VILL; - UseAltFmt |= B.UseAltFmt; + AltFmt |= B.AltFmt; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -327,7 +327,7 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType, if (Used.MaskPolicy && RISCVVType::isMaskAgnostic(CurVType) != RISCVVType::isMaskAgnostic(NewVType)) return false; - if (Used.UseAltFmt == true && + if (Used.AltFmt == true && RISCVVType::isAltFmt(CurVType) != RISCVVType::isAltFmt(NewVType)) return false; return true; @@ -481,8 +481,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) { Res.TailPolicy = false; } - Res.UseAltFmt = RISCVII::getAltFmtType(MI.getDesc().TSFlags) != - RISCVII::AltFmtType::DontCare; + Res.AltFmt = RISCVII::getAltFmtType(MI.getDesc().TSFlags) != + RISCVII::AltFmtType::DontCare; return Res; } @@ -1254,7 +1254,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, IncomingInfo.getTailAgnostic(), (Demanded.MaskPolicy ? IncomingInfo : Info).getMaskAgnostic() || IncomingInfo.getMaskAgnostic(), - Demanded.UseAltFmt ? IncomingInfo.getAltFmt() : 0); + Demanded.AltFmt ? IncomingInfo.getAltFmt() : 0); // If we only knew the sew/lmul ratio previously, replace the VTYPE but keep // the AVL. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index ff7ed628112cb..6591242c6b857 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -368,7 +368,7 @@ defset list AllVectors = { def VF16M4: GroupVTypeInfo; def VF16M8: GroupVTypeInfo; + V_M8, f16, FPR16>; def VF32M2: GroupVTypeInfo; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td index 29a4c46e212ad..d5867daec548d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td @@ -44,7 +44,7 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { let mayRaiseFPException = true, Predicates = [HasStdExtZvfbfwma] in defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM; -defset list AllWidenableIntToBFloatVectors = { +defset list AllWidenableIntToBF16Vectors = { def : VTypeInfoToWide; def : VTypeInfoToWide; def : VTypeInfoToWide; @@ -364,10 +364,9 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { Sched<[WriteVMovFS, ReadVMovFS]>; let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1, Constraints = "$rd = $passthru" in - def "PseudoVFMV_S_" # f.FX # "_ALT": + def "PseudoVFMV_S_" # f.FX # "_ALT" : RISCVVPseudo<(outs VR:$rd), - (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew), - []>, + (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>, Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>; } @@ -566,7 +565,7 @@ multiclass VPatConversionVI_VF_BF16 { multiclass VPatConversionWF_VI_BF16 { - foreach vtiToWti = AllWidenableIntToBFloatVectors in { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, @@ -591,7 +590,7 @@ multiclass VPatConversionWF_VF_BF16 { - foreach vtiToWti = AllWidenableIntToBFloatVectors in { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, @@ -603,7 +602,7 @@ multiclass VPatConversionVI_WF_BF16 { } multiclass VPatConversionVI_WF_RM_BF16 { - foreach vtiToWti = AllWidenableIntToBFloatVectors in { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 2e3d46e7bd013..a58d138f3e230 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -293,9 +293,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { } bool hasVInstructionsF32() const { return HasStdExtZve32f; } bool hasVInstructionsF64() const { return HasStdExtZve64d; } - bool hasVInstructionsBF16() const { - return HasStdExtZvfbfmin || HasStdExtZvfbfa; - } + bool hasVInstructionsBF16() const { return HasStdExtZvfbfa; } // F16 and F64 both require F32. bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); } bool hasVInstructionsFullMultiply() const { return HasStdExtV; } From 4e6ece85b40dd8d3ffddce90e6ef710bdf8382be Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 30 Sep 2025 19:51:58 -0700 Subject: [PATCH 06/11] fixup! Address review comments --- llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 7cc1bba1bb78c..5a90f469165fb 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -327,7 +327,7 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType, if (Used.MaskPolicy && RISCVVType::isMaskAgnostic(CurVType) != RISCVVType::isMaskAgnostic(NewVType)) return false; - if (Used.AltFmt == true && + if (Used.AltFmt && RISCVVType::isAltFmt(CurVType) != RISCVVType::isAltFmt(NewVType)) return false; return true; From 7a533972c25828432c3b5210a314f78ac8ad8443 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Wed, 8 Oct 2025 10:28:12 +0800 Subject: [PATCH 07/11] fixup! move c tests to zvfbfa folder --- .../vfadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfadd.c} | 0 .../vfclass-bf16.c => zvfbfa/non-policy/non-overloaded/vfclass.c} | 0 .../vfmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfmacc.c} | 0 .../vfmadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfmadd.c} | 0 .../vfmax-bf16.c => zvfbfa/non-policy/non-overloaded/vfmax.c} | 0 .../vfmerge-bf16.c => zvfbfa/non-policy/non-overloaded/vfmerge.c} | 0 .../vfmin-bf16.c => zvfbfa/non-policy/non-overloaded/vfmin.c} | 0 .../vfmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfmsac.c} | 0 .../vfmsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfmsub.c} | 0 .../vfmul-bf16.c => zvfbfa/non-policy/non-overloaded/vfmul.c} | 0 .../vfmv-bf16.c => zvfbfa/non-policy/non-overloaded/vfmv.c} | 0 .../vfncvt-bf16.c => zvfbfa/non-policy/non-overloaded/vfncvt.c} | 0 .../non-policy/non-overloaded/vfncvt_rod.c} | 0 .../non-policy/non-overloaded/vfncvt_rtz.c} | 0 .../vfnmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmacc.c} | 0 .../vfnmadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmadd.c} | 0 .../vfnmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmsac.c} | 0 .../vfnmsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmsub.c} | 0 .../vfrec7-bf16.c => zvfbfa/non-policy/non-overloaded/vfrec7.c} | 0 .../non-policy/non-overloaded/vfrsqrt7.c} | 0 .../vfrsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfrsub.c} | 0 .../vfsgnj-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnj.c} | 0 .../vfsgnjn-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnjn.c} | 0 .../vfsgnjx-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnjx.c} | 0 .../non-policy/non-overloaded/vfslide1down.c} | 0 .../non-policy/non-overloaded/vfslide1up.c} | 0 .../vfsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfsub.c} | 0 .../vfwadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfwadd.c} | 0 .../vfwcvt-bf16.c => zvfbfa/non-policy/non-overloaded/vfwcvt.c} | 0 .../vfwmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmacc.c} | 0 .../vfwmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmsac.c} | 0 .../vfwmul-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmul.c} | 0 .../non-policy/non-overloaded/vfwnmacc.c} | 0 .../non-policy/non-overloaded/vfwnmsac.c} | 0 .../vfwsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfwsub.c} | 0 .../vmfeq-bf16.c => zvfbfa/non-policy/non-overloaded/vmfeq.c} | 0 .../vmfge-bf16.c => zvfbfa/non-policy/non-overloaded/vmfge.c} | 0 .../vmfgt-bf16.c => zvfbfa/non-policy/non-overloaded/vmfgt.c} | 0 .../vmfle-bf16.c => zvfbfa/non-policy/non-overloaded/vmfle.c} | 0 .../vmflt-bf16.c => zvfbfa/non-policy/non-overloaded/vmflt.c} | 0 .../vmfne-bf16.c => zvfbfa/non-policy/non-overloaded/vmfne.c} | 0 .../vfadd-bf16.c => zvfbfa/non-policy/overloaded/vfadd.c} | 0 .../vfclass-bf16.c => zvfbfa/non-policy/overloaded/vfclass.c} | 0 .../vfmacc-bf16.c => zvfbfa/non-policy/overloaded/vfmacc.c} | 0 .../vfmadd-bf16.c => zvfbfa/non-policy/overloaded/vfmadd.c} | 0 .../vfmax-bf16.c => zvfbfa/non-policy/overloaded/vfmax.c} | 0 .../vfmerge-bf16.c => zvfbfa/non-policy/overloaded/vfmerge.c} | 0 .../vfmin-bf16.c => zvfbfa/non-policy/overloaded/vfmin.c} | 0 .../vfmsac-bf16.c => zvfbfa/non-policy/overloaded/vfmsac.c} | 0 .../vfmsub-bf16.c => zvfbfa/non-policy/overloaded/vfmsub.c} | 0 .../vfmul-bf16.c => zvfbfa/non-policy/overloaded/vfmul.c} | 0 .../vfmv-bf16.c => zvfbfa/non-policy/overloaded/vfmv.c} | 0 .../vfncvt-bf16.c => zvfbfa/non-policy/overloaded/vfncvt.c} | 0 .../non-policy/overloaded/vfncvt_rod.c} | 0 .../non-policy/overloaded/vfncvt_rtz.c} | 0 .../vfnmacc-bf16.c => zvfbfa/non-policy/overloaded/vfnmacc.c} | 0 .../vfnmadd-bf16.c => zvfbfa/non-policy/overloaded/vfnmadd.c} | 0 .../vfnmsac-bf16.c => zvfbfa/non-policy/overloaded/vfnmsac.c} | 0 .../vfnmsub-bf16.c => zvfbfa/non-policy/overloaded/vfnmsub.c} | 0 .../vfrec7-bf16.c => zvfbfa/non-policy/overloaded/vfrec7.c} | 0 .../vfrsqrt7-bf16.c => zvfbfa/non-policy/overloaded/vfrsqrt7.c} | 0 .../vfrsub-bf16.c => zvfbfa/non-policy/overloaded/vfrsub.c} | 0 .../vfsgnj-bf16.c => zvfbfa/non-policy/overloaded/vfsgnj.c} | 0 .../vfsgnjn-bf16.c => zvfbfa/non-policy/overloaded/vfsgnjn.c} | 0 .../vfsgnjx-bf16.c => zvfbfa/non-policy/overloaded/vfsgnjx.c} | 0 .../non-policy/overloaded/vfslide1down.c} | 0 .../non-policy/overloaded/vfslide1up.c} | 0 .../vfsub-bf16.c => zvfbfa/non-policy/overloaded/vfsub.c} | 0 .../vfwadd-bf16.c => zvfbfa/non-policy/overloaded/vfwadd.c} | 0 .../vfwcvt-bf16.c => zvfbfa/non-policy/overloaded/vfwcvt.c} | 0 .../vfwmacc-bf16.c => zvfbfa/non-policy/overloaded/vfwmacc.c} | 0 .../vfwmsac-bf16.c => zvfbfa/non-policy/overloaded/vfwmsac.c} | 0 .../vfwmul-bf16.c => zvfbfa/non-policy/overloaded/vfwmul.c} | 0 .../vfwnmacc-bf16.c => zvfbfa/non-policy/overloaded/vfwnmacc.c} | 0 .../vfwnmsac-bf16.c => zvfbfa/non-policy/overloaded/vfwnmsac.c} | 0 .../vfwsub-bf16.c => zvfbfa/non-policy/overloaded/vfwsub.c} | 0 .../vmfeq-bf16.c => zvfbfa/non-policy/overloaded/vmfeq.c} | 0 .../vmfge-bf16.c => zvfbfa/non-policy/overloaded/vmfge.c} | 0 .../vmfgt-bf16.c => zvfbfa/non-policy/overloaded/vmfgt.c} | 0 .../vmfle-bf16.c => zvfbfa/non-policy/overloaded/vmfle.c} | 0 .../vmflt-bf16.c => zvfbfa/non-policy/overloaded/vmflt.c} | 0 .../vmfne-bf16.c => zvfbfa/non-policy/overloaded/vmfne.c} | 0 .../vfadd-bf16.c => zvfbfa/policy/non-overloaded/vfadd.c} | 0 .../vfclass-bf16.c => zvfbfa/policy/non-overloaded/vfclass.c} | 0 .../vfmacc-bf16.c => zvfbfa/policy/non-overloaded/vfmacc.c} | 0 .../vfmadd-bf16.c => zvfbfa/policy/non-overloaded/vfmadd.c} | 0 .../vfmax-bf16.c => zvfbfa/policy/non-overloaded/vfmax.c} | 0 .../vfmerge-bf16.c => zvfbfa/policy/non-overloaded/vfmerge.c} | 0 .../vfmin-bf16.c => zvfbfa/policy/non-overloaded/vfmin.c} | 0 .../vfmsac-bf16.c => zvfbfa/policy/non-overloaded/vfmsac.c} | 0 .../vfmsub-bf16.c => zvfbfa/policy/non-overloaded/vfmsub.c} | 0 .../vfmul-bf16.c => zvfbfa/policy/non-overloaded/vfmul.c} | 0 .../vfmv-bf16.c => zvfbfa/policy/non-overloaded/vfmv.c} | 0 .../vfncvt-bf16.c => zvfbfa/policy/non-overloaded/vfncvt.c} | 0 .../policy/non-overloaded/vfncvt_rod.c} | 0 .../policy/non-overloaded/vfncvt_rtz.c} | 0 .../vfnmacc-bf16.c => zvfbfa/policy/non-overloaded/vfnmacc.c} | 0 .../vfnmadd-bf16.c => zvfbfa/policy/non-overloaded/vfnmadd.c} | 0 .../vfnmsac-bf16.c => zvfbfa/policy/non-overloaded/vfnmsac.c} | 0 .../vfnmsub-bf16.c => zvfbfa/policy/non-overloaded/vfnmsub.c} | 0 .../vfrec7-bf16.c => zvfbfa/policy/non-overloaded/vfrec7.c} | 0 .../vfrsqrt7-bf16.c => zvfbfa/policy/non-overloaded/vfrsqrt7.c} | 0 .../vfrsub-bf16.c => zvfbfa/policy/non-overloaded/vfrsub.c} | 0 .../vfsgnj-bf16.c => zvfbfa/policy/non-overloaded/vfsgnj.c} | 0 .../vfsgnjn-bf16.c => zvfbfa/policy/non-overloaded/vfsgnjn.c} | 0 .../vfsgnjx-bf16.c => zvfbfa/policy/non-overloaded/vfsgnjx.c} | 0 .../policy/non-overloaded/vfslide1down.c} | 0 .../policy/non-overloaded/vfslide1up.c} | 0 .../vfsub-bf16.c => zvfbfa/policy/non-overloaded/vfsub.c} | 0 .../vfwadd-bf16.c => zvfbfa/policy/non-overloaded/vfwadd.c} | 0 .../vfwcvt-bf16.c => zvfbfa/policy/non-overloaded/vfwcvt.c} | 0 .../vfwmacc-bf16.c => zvfbfa/policy/non-overloaded/vfwmacc.c} | 0 .../vfwmsac-bf16.c => zvfbfa/policy/non-overloaded/vfwmsac.c} | 0 .../vfwmul-bf16.c => zvfbfa/policy/non-overloaded/vfwmul.c} | 0 .../vfwnmacc-bf16.c => zvfbfa/policy/non-overloaded/vfwnmacc.c} | 0 .../vfwnmsac-bf16.c => zvfbfa/policy/non-overloaded/vfwnmsac.c} | 0 .../vfwsub-bf16.c => zvfbfa/policy/non-overloaded/vfwsub.c} | 0 .../vmfeq-bf16.c => zvfbfa/policy/non-overloaded/vmfeq.c} | 0 .../vmfge-bf16.c => zvfbfa/policy/non-overloaded/vmfge.c} | 0 .../vmfgt-bf16.c => zvfbfa/policy/non-overloaded/vmfgt.c} | 0 .../vmfle-bf16.c => zvfbfa/policy/non-overloaded/vmfle.c} | 0 .../vmflt-bf16.c => zvfbfa/policy/non-overloaded/vmflt.c} | 0 .../vmfne-bf16.c => zvfbfa/policy/non-overloaded/vmfne.c} | 0 .../overloaded/vfadd-bf16.c => zvfbfa/policy/overloaded/vfadd.c} | 0 .../vfclass-bf16.c => zvfbfa/policy/overloaded/vfclass.c} | 0 .../vfmacc-bf16.c => zvfbfa/policy/overloaded/vfmacc.c} | 0 .../vfmadd-bf16.c => zvfbfa/policy/overloaded/vfmadd.c} | 0 .../overloaded/vfmax-bf16.c => zvfbfa/policy/overloaded/vfmax.c} | 0 .../vfmerge-bf16.c => zvfbfa/policy/overloaded/vfmerge.c} | 0 .../overloaded/vfmin-bf16.c => zvfbfa/policy/overloaded/vfmin.c} | 0 .../vfmsac-bf16.c => zvfbfa/policy/overloaded/vfmsac.c} | 0 .../vfmsub-bf16.c => zvfbfa/policy/overloaded/vfmsub.c} | 0 .../overloaded/vfmul-bf16.c => zvfbfa/policy/overloaded/vfmul.c} | 0 .../overloaded/vfmv-bf16.c => zvfbfa/policy/overloaded/vfmv.c} | 0 .../vfncvt-bf16.c => zvfbfa/policy/overloaded/vfncvt.c} | 0 .../vfncvt_rod-bf16.c => zvfbfa/policy/overloaded/vfncvt_rod.c} | 0 .../vfncvt_rtz-bf16.c => zvfbfa/policy/overloaded/vfncvt_rtz.c} | 0 .../vfnmacc-bf16.c => zvfbfa/policy/overloaded/vfnmacc.c} | 0 .../vfnmadd-bf16.c => zvfbfa/policy/overloaded/vfnmadd.c} | 0 .../vfnmsac-bf16.c => zvfbfa/policy/overloaded/vfnmsac.c} | 0 .../vfnmsub-bf16.c => zvfbfa/policy/overloaded/vfnmsub.c} | 0 .../vfrec7-bf16.c => zvfbfa/policy/overloaded/vfrec7.c} | 0 .../vfrsqrt7-bf16.c => zvfbfa/policy/overloaded/vfrsqrt7.c} | 0 .../vfrsub-bf16.c => zvfbfa/policy/overloaded/vfrsub.c} | 0 .../vfsgnj-bf16.c => zvfbfa/policy/overloaded/vfsgnj.c} | 0 .../vfsgnjn-bf16.c => zvfbfa/policy/overloaded/vfsgnjn.c} | 0 .../vfsgnjx-bf16.c => zvfbfa/policy/overloaded/vfsgnjx.c} | 0 .../policy/overloaded/vfslide1down.c} | 0 .../vfslide1up-bf16.c => zvfbfa/policy/overloaded/vfslide1up.c} | 0 .../overloaded/vfsub-bf16.c => zvfbfa/policy/overloaded/vfsub.c} | 0 .../vfwadd-bf16.c => zvfbfa/policy/overloaded/vfwadd.c} | 0 .../vfwcvt-bf16.c => zvfbfa/policy/overloaded/vfwcvt.c} | 0 .../vfwmacc-bf16.c => zvfbfa/policy/overloaded/vfwmacc.c} | 0 .../vfwmsac-bf16.c => zvfbfa/policy/overloaded/vfwmsac.c} | 0 .../vfwmul-bf16.c => zvfbfa/policy/overloaded/vfwmul.c} | 0 .../vfwnmacc-bf16.c => zvfbfa/policy/overloaded/vfwnmacc.c} | 0 .../vfwnmsac-bf16.c => zvfbfa/policy/overloaded/vfwnmsac.c} | 0 .../vfwsub-bf16.c => zvfbfa/policy/overloaded/vfwsub.c} | 0 .../overloaded/vmfeq-bf16.c => zvfbfa/policy/overloaded/vmfeq.c} | 0 .../overloaded/vmfge-bf16.c => zvfbfa/policy/overloaded/vmfge.c} | 0 .../overloaded/vmfgt-bf16.c => zvfbfa/policy/overloaded/vmfgt.c} | 0 .../overloaded/vmfle-bf16.c => zvfbfa/policy/overloaded/vmfle.c} | 0 .../overloaded/vmflt-bf16.c => zvfbfa/policy/overloaded/vmflt.c} | 0 .../overloaded/vmfne-bf16.c => zvfbfa/policy/overloaded/vmfne.c} | 0 164 files changed, 0 insertions(+), 0 deletions(-) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfclass-bf16.c => zvfbfa/non-policy/non-overloaded/vfclass.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmax-bf16.c => zvfbfa/non-policy/non-overloaded/vfmax.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmerge-bf16.c => zvfbfa/non-policy/non-overloaded/vfmerge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmin-bf16.c => zvfbfa/non-policy/non-overloaded/vfmin.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmul-bf16.c => zvfbfa/non-policy/non-overloaded/vfmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfmv-bf16.c => zvfbfa/non-policy/non-overloaded/vfmv.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfncvt-bf16.c => zvfbfa/non-policy/non-overloaded/vfncvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfncvt_rod-bf16.c => zvfbfa/non-policy/non-overloaded/vfncvt_rod.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfncvt_rtz-bf16.c => zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfnmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfnmadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfnmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfnmsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfnmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfrec7-bf16.c => zvfbfa/non-policy/non-overloaded/vfrec7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfrsqrt7-bf16.c => zvfbfa/non-policy/non-overloaded/vfrsqrt7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfrsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfrsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfsgnj-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnj.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfsgnjn-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnjn.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfsgnjx-bf16.c => zvfbfa/non-policy/non-overloaded/vfsgnjx.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfslide1down-bf16.c => zvfbfa/non-policy/non-overloaded/vfslide1down.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfslide1up-bf16.c => zvfbfa/non-policy/non-overloaded/vfslide1up.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwadd-bf16.c => zvfbfa/non-policy/non-overloaded/vfwadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwcvt-bf16.c => zvfbfa/non-policy/non-overloaded/vfwcvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwmul-bf16.c => zvfbfa/non-policy/non-overloaded/vfwmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwnmacc-bf16.c => zvfbfa/non-policy/non-overloaded/vfwnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwnmsac-bf16.c => zvfbfa/non-policy/non-overloaded/vfwnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vfwsub-bf16.c => zvfbfa/non-policy/non-overloaded/vfwsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmfeq-bf16.c => zvfbfa/non-policy/non-overloaded/vmfeq.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmfge-bf16.c => zvfbfa/non-policy/non-overloaded/vmfge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmfgt-bf16.c => zvfbfa/non-policy/non-overloaded/vmfgt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmfle-bf16.c => zvfbfa/non-policy/non-overloaded/vmfle.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmflt-bf16.c => zvfbfa/non-policy/non-overloaded/vmflt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/non-overloaded/vmfne-bf16.c => zvfbfa/non-policy/non-overloaded/vmfne.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfadd-bf16.c => zvfbfa/non-policy/overloaded/vfadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfclass-bf16.c => zvfbfa/non-policy/overloaded/vfclass.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmacc-bf16.c => zvfbfa/non-policy/overloaded/vfmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmadd-bf16.c => zvfbfa/non-policy/overloaded/vfmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmax-bf16.c => zvfbfa/non-policy/overloaded/vfmax.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmerge-bf16.c => zvfbfa/non-policy/overloaded/vfmerge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmin-bf16.c => zvfbfa/non-policy/overloaded/vfmin.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmsac-bf16.c => zvfbfa/non-policy/overloaded/vfmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmsub-bf16.c => zvfbfa/non-policy/overloaded/vfmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmul-bf16.c => zvfbfa/non-policy/overloaded/vfmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfmv-bf16.c => zvfbfa/non-policy/overloaded/vfmv.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfncvt-bf16.c => zvfbfa/non-policy/overloaded/vfncvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfncvt_rod-bf16.c => zvfbfa/non-policy/overloaded/vfncvt_rod.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfncvt_rtz-bf16.c => zvfbfa/non-policy/overloaded/vfncvt_rtz.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfnmacc-bf16.c => zvfbfa/non-policy/overloaded/vfnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfnmadd-bf16.c => zvfbfa/non-policy/overloaded/vfnmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfnmsac-bf16.c => zvfbfa/non-policy/overloaded/vfnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfnmsub-bf16.c => zvfbfa/non-policy/overloaded/vfnmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfrec7-bf16.c => zvfbfa/non-policy/overloaded/vfrec7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfrsqrt7-bf16.c => zvfbfa/non-policy/overloaded/vfrsqrt7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfrsub-bf16.c => zvfbfa/non-policy/overloaded/vfrsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfsgnj-bf16.c => zvfbfa/non-policy/overloaded/vfsgnj.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfsgnjn-bf16.c => zvfbfa/non-policy/overloaded/vfsgnjn.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfsgnjx-bf16.c => zvfbfa/non-policy/overloaded/vfsgnjx.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfslide1down-bf16.c => zvfbfa/non-policy/overloaded/vfslide1down.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfslide1up-bf16.c => zvfbfa/non-policy/overloaded/vfslide1up.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfsub-bf16.c => zvfbfa/non-policy/overloaded/vfsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwadd-bf16.c => zvfbfa/non-policy/overloaded/vfwadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwcvt-bf16.c => zvfbfa/non-policy/overloaded/vfwcvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwmacc-bf16.c => zvfbfa/non-policy/overloaded/vfwmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwmsac-bf16.c => zvfbfa/non-policy/overloaded/vfwmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwmul-bf16.c => zvfbfa/non-policy/overloaded/vfwmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwnmacc-bf16.c => zvfbfa/non-policy/overloaded/vfwnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwnmsac-bf16.c => zvfbfa/non-policy/overloaded/vfwnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vfwsub-bf16.c => zvfbfa/non-policy/overloaded/vfwsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmfeq-bf16.c => zvfbfa/non-policy/overloaded/vmfeq.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmfge-bf16.c => zvfbfa/non-policy/overloaded/vmfge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmfgt-bf16.c => zvfbfa/non-policy/overloaded/vmfgt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmfle-bf16.c => zvfbfa/non-policy/overloaded/vmfle.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmflt-bf16.c => zvfbfa/non-policy/overloaded/vmflt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{non-policy/overloaded/vmfne-bf16.c => zvfbfa/non-policy/overloaded/vmfne.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfadd-bf16.c => zvfbfa/policy/non-overloaded/vfadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfclass-bf16.c => zvfbfa/policy/non-overloaded/vfclass.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmacc-bf16.c => zvfbfa/policy/non-overloaded/vfmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmadd-bf16.c => zvfbfa/policy/non-overloaded/vfmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmax-bf16.c => zvfbfa/policy/non-overloaded/vfmax.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmerge-bf16.c => zvfbfa/policy/non-overloaded/vfmerge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmin-bf16.c => zvfbfa/policy/non-overloaded/vfmin.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmsac-bf16.c => zvfbfa/policy/non-overloaded/vfmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmsub-bf16.c => zvfbfa/policy/non-overloaded/vfmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmul-bf16.c => zvfbfa/policy/non-overloaded/vfmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfmv-bf16.c => zvfbfa/policy/non-overloaded/vfmv.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfncvt-bf16.c => zvfbfa/policy/non-overloaded/vfncvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfncvt_rod-bf16.c => zvfbfa/policy/non-overloaded/vfncvt_rod.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfncvt_rtz-bf16.c => zvfbfa/policy/non-overloaded/vfncvt_rtz.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfnmacc-bf16.c => zvfbfa/policy/non-overloaded/vfnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfnmadd-bf16.c => zvfbfa/policy/non-overloaded/vfnmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfnmsac-bf16.c => zvfbfa/policy/non-overloaded/vfnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfnmsub-bf16.c => zvfbfa/policy/non-overloaded/vfnmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfrec7-bf16.c => zvfbfa/policy/non-overloaded/vfrec7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfrsqrt7-bf16.c => zvfbfa/policy/non-overloaded/vfrsqrt7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfrsub-bf16.c => zvfbfa/policy/non-overloaded/vfrsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfsgnj-bf16.c => zvfbfa/policy/non-overloaded/vfsgnj.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfsgnjn-bf16.c => zvfbfa/policy/non-overloaded/vfsgnjn.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfsgnjx-bf16.c => zvfbfa/policy/non-overloaded/vfsgnjx.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfslide1down-bf16.c => zvfbfa/policy/non-overloaded/vfslide1down.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfslide1up-bf16.c => zvfbfa/policy/non-overloaded/vfslide1up.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfsub-bf16.c => zvfbfa/policy/non-overloaded/vfsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwadd-bf16.c => zvfbfa/policy/non-overloaded/vfwadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwcvt-bf16.c => zvfbfa/policy/non-overloaded/vfwcvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwmacc-bf16.c => zvfbfa/policy/non-overloaded/vfwmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwmsac-bf16.c => zvfbfa/policy/non-overloaded/vfwmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwmul-bf16.c => zvfbfa/policy/non-overloaded/vfwmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwnmacc-bf16.c => zvfbfa/policy/non-overloaded/vfwnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwnmsac-bf16.c => zvfbfa/policy/non-overloaded/vfwnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vfwsub-bf16.c => zvfbfa/policy/non-overloaded/vfwsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmfeq-bf16.c => zvfbfa/policy/non-overloaded/vmfeq.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmfge-bf16.c => zvfbfa/policy/non-overloaded/vmfge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmfgt-bf16.c => zvfbfa/policy/non-overloaded/vmfgt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmfle-bf16.c => zvfbfa/policy/non-overloaded/vmfle.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmflt-bf16.c => zvfbfa/policy/non-overloaded/vmflt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/non-overloaded/vmfne-bf16.c => zvfbfa/policy/non-overloaded/vmfne.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfadd-bf16.c => zvfbfa/policy/overloaded/vfadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfclass-bf16.c => zvfbfa/policy/overloaded/vfclass.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmacc-bf16.c => zvfbfa/policy/overloaded/vfmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmadd-bf16.c => zvfbfa/policy/overloaded/vfmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmax-bf16.c => zvfbfa/policy/overloaded/vfmax.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmerge-bf16.c => zvfbfa/policy/overloaded/vfmerge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmin-bf16.c => zvfbfa/policy/overloaded/vfmin.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmsac-bf16.c => zvfbfa/policy/overloaded/vfmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmsub-bf16.c => zvfbfa/policy/overloaded/vfmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmul-bf16.c => zvfbfa/policy/overloaded/vfmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfmv-bf16.c => zvfbfa/policy/overloaded/vfmv.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfncvt-bf16.c => zvfbfa/policy/overloaded/vfncvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfncvt_rod-bf16.c => zvfbfa/policy/overloaded/vfncvt_rod.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfncvt_rtz-bf16.c => zvfbfa/policy/overloaded/vfncvt_rtz.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfnmacc-bf16.c => zvfbfa/policy/overloaded/vfnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfnmadd-bf16.c => zvfbfa/policy/overloaded/vfnmadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfnmsac-bf16.c => zvfbfa/policy/overloaded/vfnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfnmsub-bf16.c => zvfbfa/policy/overloaded/vfnmsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfrec7-bf16.c => zvfbfa/policy/overloaded/vfrec7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfrsqrt7-bf16.c => zvfbfa/policy/overloaded/vfrsqrt7.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfrsub-bf16.c => zvfbfa/policy/overloaded/vfrsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfsgnj-bf16.c => zvfbfa/policy/overloaded/vfsgnj.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfsgnjn-bf16.c => zvfbfa/policy/overloaded/vfsgnjn.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfsgnjx-bf16.c => zvfbfa/policy/overloaded/vfsgnjx.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfslide1down-bf16.c => zvfbfa/policy/overloaded/vfslide1down.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfslide1up-bf16.c => zvfbfa/policy/overloaded/vfslide1up.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfsub-bf16.c => zvfbfa/policy/overloaded/vfsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwadd-bf16.c => zvfbfa/policy/overloaded/vfwadd.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwcvt-bf16.c => zvfbfa/policy/overloaded/vfwcvt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwmacc-bf16.c => zvfbfa/policy/overloaded/vfwmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwmsac-bf16.c => zvfbfa/policy/overloaded/vfwmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwmul-bf16.c => zvfbfa/policy/overloaded/vfwmul.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwnmacc-bf16.c => zvfbfa/policy/overloaded/vfwnmacc.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwnmsac-bf16.c => zvfbfa/policy/overloaded/vfwnmsac.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vfwsub-bf16.c => zvfbfa/policy/overloaded/vfwsub.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmfeq-bf16.c => zvfbfa/policy/overloaded/vmfeq.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmfge-bf16.c => zvfbfa/policy/overloaded/vmfge.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmfgt-bf16.c => zvfbfa/policy/overloaded/vmfgt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmfle-bf16.c => zvfbfa/policy/overloaded/vmfle.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmflt-bf16.c => zvfbfa/policy/overloaded/vmflt.c} (100%) rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/{policy/overloaded/vmfne-bf16.c => zvfbfa/policy/overloaded/vmfne.c} (100%) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfclass-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmax-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmin-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrec7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsqrt7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfrsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnj-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjn-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsgnjx-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1down-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfslide1up-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfeq-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfgt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfle-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmflt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmfne-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfclass-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmax-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmin-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmv-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfnmsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrec7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsqrt7-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfrsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnj-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjn-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsgnjx-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1down-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfslide1up-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwadd-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwmul-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmacc-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwnmsac-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwsub-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfeq-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfge-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfgt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfle-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmflt-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c similarity index 100% rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmfne-bf16.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c From d8ee8c21e67a4aa3185b1b529191a89bd6da8d92 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 13 Oct 2025 22:34:39 +0800 Subject: [PATCH 08/11] fixup! [RISCV][clang] Support Zvfbfa C intrinsics --- clang/include/clang/Basic/riscv_vector_common.td | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td index 2a2a04998366a..eaa2ba43885b0 100644 --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -83,6 +83,8 @@ // elements of the same width // F: given a vector type, compute the vector type with floating-point type // elements of the same width +// Y: given a vector type, compute the vector type with bfloat16 type elements +// of the same width // S: given a vector type, computes its equivalent one for LMUL=1. This is a // no-op if the vector was already LMUL=1 // (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a From 7731e98ee3e6df697fcba9158e3e984f198e3152 Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 14 Oct 2025 08:49:16 -0700 Subject: [PATCH 09/11] fixup! clean up and update test --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 3 --- llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index c1b23afb2adb6..65865ce461624 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -128,9 +128,6 @@ defvar TAIL_AGNOSTIC = 1; defvar TU_MU = 0; defvar TA_MU = 1; defvar TA_MA = 3; -defvar DONT_CARE_ALTFMT = 0; -defvar IS_NOT_ALTFMT = 1; -defvar IS_ALTFMT = 2; //===----------------------------------------------------------------------===// // Utilities. diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll index 145fc5dee4b4c..489323b323110 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -129,7 +129,7 @@ define @test_bf16_half_bf16( %0, @test_bf16_i16( %0, Date: Tue, 14 Oct 2025 16:43:28 -0700 Subject: [PATCH 10/11] fixup! Update findCommutedOpIndices and commuteInstructionImpl. --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 4 ++ llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll | 47 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll | 47 +++++++++++++++++++++++ 9 files changed, 380 insertions(+) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index ddb53a2ce62b3..e581900cdc775 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -3775,11 +3775,13 @@ std::string RISCVInstrInfo::createMIROperandComment( #define CASE_VFMA_OPCODE_VV(OP) \ CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \ + case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \ case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \ case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64) #define CASE_VFMA_SPLATS(OP) \ CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \ + case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \ case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \ case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64) // clang-format on @@ -4003,11 +4005,13 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI, #define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \ + CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64) #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \ CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \ + CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \ CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \ CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64) // clang-format on diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll index 0937a82b48580..13821d745846f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmadd.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmadd.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll index 795fd4f56ec2c..09fc199c29d23 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmacc.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll index ebeda9eaf42c2..948d2196f2bb4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsub.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsub.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll index b032c1d66f3b9..6838f37339e98 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsac.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll index 4bf643003a38c..4b4091ba7acbe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmadd.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmadd.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll index 7dcaa1c24e6de..2bb6bf5ae9e26 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmacc.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll index 9528f80453398..cfbaafa00c043 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsub.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsub.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll index dcbb9ced92db7..5ebbb90c4c5a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll @@ -504,3 +504,50 @@ entry: ret %a } +define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + %1, + %0, + %2, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16_commute2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsac.vv v8, v10, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + %1, + %2, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} + +define @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16_commute: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.bf16( + %2, + bfloat %1, + %0, + iXLen 7, iXLen %3, iXLen 3) + + ret %a +} From 4cc280b6b981363838fe2e070389c7e1f47e37c5 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 14 Oct 2025 16:43:39 -0700 Subject: [PATCH 11/11] fixup! Update convertToThreeAddress. --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 18 ++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll | 24 +++++++++++----------- llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll | 24 +++++++++++----------- 3 files changed, 42 insertions(+), 24 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index e581900cdc775..12f776bbd4fa4 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -4473,6 +4473,20 @@ bool RISCVInstrInfo::simplifyInstruction(MachineInstr &MI) const { CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \ CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \ + +#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \ + CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \ + case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \ + case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \ + case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \ + case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16) + +#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \ + CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \ + CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \ + CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \ + CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \ + CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) // clang-format on MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI, @@ -4482,6 +4496,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI, switch (MI.getOpcode()) { default: return nullptr; + case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWADD_ALT_WV): + case CASE_FP_WIDEOP_OPCODE_LMULS_ALT(FWSUB_ALT_WV): case CASE_FP_WIDEOP_OPCODE_LMULS(FWADD_WV): case CASE_FP_WIDEOP_OPCODE_LMULS(FWSUB_WV): { assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) && @@ -4498,6 +4514,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI, llvm_unreachable("Unexpected opcode"); CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV) CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV) + CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWADD_ALT_WV) + CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(FWSUB_ALT_WV) } // clang-format on diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll index 69c9a4ea75c97..c5417e826bf41 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -700,9 +700,9 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf1 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma -; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: vfwadd.wv v10, v9, v8 ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( @@ -719,9 +719,9 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf1 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma -; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: vfwadd.wv v10, v9, v8 ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( @@ -736,11 +736,11 @@ entry: define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma -; CHECK-NEXT: vfwadd.wv v10, v10, v8 -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.wv v8, v10, v12 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( @@ -755,11 +755,11 @@ entry: define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma -; CHECK-NEXT: vfwadd.wv v12, v12, v8 -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.wv v8, v12, v16 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll index 11066d9487684..b22899a100e4a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -700,9 +700,9 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf1 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma -; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: vfwsub.wv v10, v9, v8 ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( @@ -719,9 +719,9 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf1 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma -; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: vfwsub.wv v10, v9, v8 ; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( @@ -736,11 +736,11 @@ entry: define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma -; CHECK-NEXT: vfwsub.wv v10, v10, v8 -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.wv v8, v10, v12 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( @@ -755,11 +755,11 @@ entry: define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fsrmi a1, 0 ; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma -; CHECK-NEXT: vfwsub.wv v12, v12, v8 -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.wv v8, v12, v16 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16(