diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 09f518cc54772..f23d9edc9d96a 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -226,6 +226,11 @@ multiclass RVVFloatingBinBuiltinSet [["vv", "v", "vvv"], ["vf", "v", "vve"]]>; +multiclass RVVFloatingBinBuiltinSetRoundingMode + : RVVOutOp1BuiltinSet; + multiclass RVVFloatingBinVFBuiltinSet : RVVOutOp1BuiltinSet; @@ -1856,10 +1861,72 @@ let ManualCodegen = [{ defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode; defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode; } +} // 14. Vector Floating-Point Instructions +let HeaderCode = +[{ +enum __RISCV_FRM { + __RISCV_FRM_RNE = 0, + __RISCV_FRM_RTZ = 1, + __RISCV_FRM_RDN = 2, + __RISCV_FRM_RUP = 3, + __RISCV_FRM_RMM = 4, +}; +}] in def frm_enum : RVVHeader; + +let UnMaskedPolicyScheme = HasPassthruOperand in { // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm vfadd : RVVFloatingBinBuiltinSet; +let ManualCodegen = [{ + { + // LLVM intrinsic + // Unmasked: (passthru, op0, op1, round_mode, vl) + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy) + + SmallVector Operands; + bool HasMaskedOff = !( + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || + (!IsMasked && PolicyAttrs & RVV_VTA)); + bool HasRoundModeOp = IsMasked ? + (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : + (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); + + unsigned Offset = IsMasked ? + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); + + if (!HasMaskedOff) + Operands.push_back(llvm::PoisonValue::get(ResultType)); + else + Operands.push_back(Ops[IsMasked ? 1 : 0]); + + Operands.push_back(Ops[Offset]); // op0 + Operands.push_back(Ops[Offset + 1]); // op1 + + if (IsMasked) + Operands.push_back(Ops[0]); // mask + + if (HasRoundModeOp) { + Operands.push_back(Ops[Offset + 2]); // frm + Operands.push_back(Ops[Offset + 3]); // vl + } else { + Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm + Operands.push_back(Ops[Offset + 2]); // vl + } + + if (IsMasked) + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); + + IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), + Operands.back()->getType()}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + return Builder.CreateCall(F, Operands, ""); + } +}] in { + let HasFRMRoundModeOp = true in { + defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; + } + defm vfadd : RVVFloatingBinBuiltinSet; +} defm vfsub : RVVFloatingBinBuiltinSet; defm vfrsub : RVVFloatingBinVFBuiltinSet; diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td index f98432839f1d2..e276e4c3c409b 100644 --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -234,6 +234,10 @@ class RVVBuiltin IntrinsicTypes; unsigned NF = 1; Policy PolicyAttrs; + bool HasFRMRoundModeOp; public: RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix, @@ -397,7 +398,7 @@ class RVVIntrinsic { const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, - unsigned NF, Policy PolicyAttrs); + unsigned NF, Policy PolicyAttrs, bool HasFRMRoundModeOp); ~RVVIntrinsic() = default; RVVTypePtr getOutputType() const { return OutputType; } @@ -467,7 +468,7 @@ class RVVIntrinsic { static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName, std::string &OverloadedName, - Policy &PolicyAttrs); + Policy &PolicyAttrs, bool HasFRMRoundModeOp); }; // RVVRequire should be sync'ed with target features, but only @@ -526,6 +527,7 @@ struct RVVIntrinsicRecord { bool HasMaskedOffOperand : 1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool HasFRMRoundModeOp : 1; bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 3284cf1b4f91b..7f0fed46083a1 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -4808,6 +4808,21 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu: case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu: return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3); + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm: + return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4); + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu: + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tama: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tama: + return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4); + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum: + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu: + case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu: + case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu: + return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4); case RISCV::BI__builtin_riscv_ntl_load: case RISCV::BI__builtin_riscv_ntl_store: DeclRefExpr *DRE = diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp index fc3086c7d78f0..fbed6e8d26cbc 100644 --- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp +++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp @@ -362,7 +362,8 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic( std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name); RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName, - OverloadedName, PolicyAttrs); + OverloadedName, PolicyAttrs, + Record.HasFRMRoundModeOp); // Put into IntrinsicList. size_t Index = IntrinsicList.size(); diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp index cba4ab7c67588..12b9ca38faa0f 100644 --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -875,20 +875,19 @@ std::optional RVVTypeCache::computeType(BasicType BT, int Log2LMUL, //===----------------------------------------------------------------------===// // RVVIntrinsic implementation //===----------------------------------------------------------------------===// -RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, - StringRef NewOverloadedName, - StringRef OverloadedSuffix, StringRef IRName, - bool IsMasked, bool HasMaskedOffOperand, bool HasVL, - PolicyScheme Scheme, bool SupportOverloading, - bool HasBuiltinAlias, StringRef ManualCodegen, - const RVVTypes &OutInTypes, - const std::vector &NewIntrinsicTypes, - const std::vector &RequiredFeatures, - unsigned NF, Policy NewPolicyAttrs) +RVVIntrinsic::RVVIntrinsic( + StringRef NewName, StringRef Suffix, StringRef NewOverloadedName, + StringRef OverloadedSuffix, StringRef IRName, bool IsMasked, + bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme, + bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen, + const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, + const std::vector &RequiredFeatures, unsigned NF, + Policy NewPolicyAttrs, bool HasFRMRoundModeOp) : IRName(IRName), IsMasked(IsMasked), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme), SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias), - ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) { + ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs), + HasFRMRoundModeOp(HasFRMRoundModeOp) { // Init BuiltinName, Name and OverloadedName BuiltinName = NewName.str(); @@ -903,7 +902,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, OverloadedName += "_" + OverloadedSuffix.str(); updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName, - PolicyAttrs); + PolicyAttrs, HasFRMRoundModeOp); // Init OutputType and InputTypes OutputType = OutInTypes[0]; @@ -1045,11 +1044,9 @@ RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy, "and mask policy"); } -void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy, - std::string &Name, - std::string &BuiltinName, - std::string &OverloadedName, - Policy &PolicyAttrs) { +void RVVIntrinsic::updateNamesAndPolicy( + bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName, + std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) { auto appendPolicySuffix = [&](const std::string &suffix) { Name += suffix; @@ -1062,6 +1059,11 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy, Name = "__riscv_" + Name; OverloadedName = "__riscv_" + OverloadedName; + if (HasFRMRoundModeOp) { + Name += "_rm"; + BuiltinName += "_rm"; + } + if (IsMasked) { if (PolicyAttrs.isTUMUPolicy()) appendPolicySuffix("_tumu"); @@ -1131,6 +1133,7 @@ raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) { OS << (int)Record.HasMaskedOffOperand << ","; OS << (int)Record.HasTailPolicy << ","; OS << (int)Record.HasMaskPolicy << ","; + OS << (int)Record.HasFRMRoundModeOp << ","; OS << (int)Record.IsTuple << ","; OS << (int)Record.UnMaskedPolicyScheme << ","; OS << (int)Record.MaskedPolicyScheme << ","; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c index d9ec3d8a2cf77..339325b958db4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,10 +600,610 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c index 4de415e21b99e..ad850f48c7d6c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,10 +600,610 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m // CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd(mask, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm +// CHECK-RV64-SAME: ( [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c index 27e807f6252ec..343c8c098c9b8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -620,7 +620,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -630,7 +630,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -640,7 +640,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -650,7 +650,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -660,7 +660,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -670,7 +670,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -680,7 +680,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -690,7 +690,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -710,7 +710,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -730,7 +730,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -740,7 +740,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -750,7 +750,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -760,7 +760,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -790,7 +790,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -800,7 +800,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -830,7 +830,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -860,7 +860,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -870,7 +870,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -880,7 +880,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -890,7 +890,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -900,7 +900,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -910,7 +910,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -920,7 +920,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -930,7 +930,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -990,7 +990,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -1200,10 +1200,1210 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c index 7e140beb76250..025f773c27fc1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c @@ -10,7 +10,7 @@ // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tu // CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -600,7 +600,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tum // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -610,7 +610,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -620,7 +620,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -630,7 +630,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -640,7 +640,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -650,7 +650,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -660,7 +660,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -670,7 +670,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -680,7 +680,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -690,7 +690,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -700,7 +700,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -710,7 +710,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -720,7 +720,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -730,7 +730,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -740,7 +740,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -750,7 +750,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -760,7 +760,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -770,7 +770,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -780,7 +780,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -790,7 +790,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -800,7 +800,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -810,7 +810,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -820,7 +820,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -830,7 +830,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -840,7 +840,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -850,7 +850,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -860,7 +860,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -870,7 +870,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -880,7 +880,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -890,7 +890,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -900,7 +900,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_tumu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { @@ -910,7 +910,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -920,7 +920,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -930,7 +930,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -940,7 +940,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -950,7 +950,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -960,7 +960,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -970,7 +970,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -980,7 +980,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -990,7 +990,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -1000,7 +1000,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -1010,7 +1010,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -1020,7 +1020,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -1030,7 +1030,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -1040,7 +1040,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -1050,7 +1050,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -1060,7 +1060,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -1070,7 +1070,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -1080,7 +1080,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -1090,7 +1090,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -1100,7 +1100,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -1110,7 +1110,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -1120,7 +1120,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -1130,7 +1130,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -1140,7 +1140,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -1150,7 +1150,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -1160,7 +1160,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -1170,7 +1170,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -1180,7 +1180,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -1190,7 +1190,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -1200,10 +1200,1210 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_mu // CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32mf2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_rm_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c new file mode 100644 index 0000000000000..1c1fbbe19eba3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c @@ -0,0 +1,67 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ +// RUN: -fsyntax-only -verify %s + +#include + +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm(op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm(op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm_m(mask, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm_m(mask, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm_tu(maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm_tu(maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vv_f32m1_tum( + vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, 5, vl); +} + +vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + // expected-error@+1 {{argument value 5 is outside the valid range [0, 4]}} + return __riscv_vfadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, 5, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index f7ef8c5d167fe..a9349f1fdc1c7 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -65,6 +65,7 @@ struct SemaRecord { bool HasMaskedOffOperand :1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool HasFRMRoundModeOp : 1; bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; @@ -530,6 +531,7 @@ void RVVEmitter::createRVVIntrinsics( StringRef MaskedIRName = R->getValueAsString("MaskedIRName"); unsigned NF = R->getValueAsInt("NF"); bool IsTuple = R->getValueAsBit("IsTuple"); + bool HasFRMRoundModeOp = R->getValueAsBit("HasFRMRoundModeOp"); const Policy DefaultPolicy; SmallVector SupportedUnMaskedPolicies = @@ -577,7 +579,7 @@ void RVVEmitter::createRVVIntrinsics( /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF, - DefaultPolicy)); + DefaultPolicy, HasFRMRoundModeOp)); if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone) for (auto P : SupportedUnMaskedPolicies) { SmallVector PolicyPrototype = @@ -592,7 +594,7 @@ void RVVEmitter::createRVVIntrinsics( /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, - NF, P)); + NF, P, HasFRMRoundModeOp)); } if (!HasMasked) continue; @@ -603,7 +605,8 @@ void RVVEmitter::createRVVIntrinsics( Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes, - IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy)); + IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy, + HasFRMRoundModeOp)); if (MaskedPolicyScheme == PolicyScheme::SchemeNone) continue; for (auto P : SupportedMaskedPolicies) { @@ -618,7 +621,7 @@ void RVVEmitter::createRVVIntrinsics( MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF, - P)); + P, HasFRMRoundModeOp)); } } // End for Log2LMULList } // End for TypeRange @@ -671,6 +674,7 @@ void RVVEmitter::createRVVIntrinsics( SR.Suffix = parsePrototypes(SuffixProto); SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto); SR.IsTuple = IsTuple; + SR.HasFRMRoundModeOp = HasFRMRoundModeOp; SemaRecords->push_back(SR); } @@ -713,6 +717,7 @@ void RVVEmitter::createRVVIntrinsicRecords(std::vector &Out, R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme; R.MaskedPolicyScheme = SR.MaskedPolicyScheme; R.IsTuple = SR.IsTuple; + R.HasFRMRoundModeOp = SR.HasFRMRoundModeOp; assert(R.PrototypeIndex != static_cast(SemaSignatureTable::INVALID_INDEX)); diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index a9cd18dd91ce1..544f5467b2793 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -420,6 +420,27 @@ let TargetPrefix = "riscv" in { let ScalarOperand = 2; let VLOperand = 4; } + // For destination vector type is the same as first source vector. + // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl) + class RISCVBinaryAAXUnMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 4; + } + // For destination vector type is the same as first source vector (with mask). + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy) + class RISCVBinaryAAXMaskedRoundingMode + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, + LLVMMatchType<2>, LLVMMatchType<2>], + [ImmArg>, ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 5; + } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. // Input: (passthru, vector_in, vector_in/scalar_in, vl) @@ -1088,6 +1109,10 @@ let TargetPrefix = "riscv" in { def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked; } + multiclass RISCVBinaryAAXRoundingMode { + def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode; + } // Like RISCVBinaryAAX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryAAShift { @@ -1296,7 +1321,7 @@ let TargetPrefix = "riscv" in { defm vwmaccus : RISCVTernaryWide; defm vwmaccsu : RISCVTernaryWide; - defm vfadd : RISCVBinaryAAX; + defm vfadd : RISCVBinaryAAXRoundingMode; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 0584b4835f1b3..7269f95a4816d 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -110,6 +110,9 @@ enum { HasRoundModeOpShift = IsSignExtendingOpWShift + 1, HasRoundModeOpMask = 1 << HasRoundModeOpShift, + + UsesVXRMShift = HasRoundModeOpShift + 1, + UsesVXRMMask = 1 << UsesVXRMShift, }; enum VLMUL : uint8_t { @@ -172,6 +175,9 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) { return TSFlags & HasRoundModeOpMask; } +/// \returns true if this instruction uses vxrm +static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; } + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; // This method is only called if we expect to have a VL operand, and all diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 03191f704f2dd..f7844a2facb91 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "RISCVISelDAGToDAG.h" +#include "MCTargetDesc/RISCVBaseInfo.h" #include "MCTargetDesc/RISCVMCTargetDesc.h" #include "MCTargetDesc/RISCVMatInt.h" #include "RISCVISelLowering.h" @@ -3336,8 +3337,21 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { Ops.append(True->op_begin() + TrueVLIndex + 3, True->op_end()); } else { Ops.push_back(False); - Ops.append(True->op_begin() + HasTiedDest, True->op_begin() + TrueVLIndex); - Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)}); + if (RISCVII::hasRoundModeOp(TrueTSFlags)) { + // For unmasked "VOp" with rounding mode operand, that is interfaces like + // (..., rm, vl) or (..., rm, vl, policy). + // Its masked version is (..., vm, rm, vl, policy). + // Check the rounding mode pseudo nodes under RISCVInstrInfoVPseudos.td + SDValue RoundMode = True->getOperand(TrueVLIndex - 1); + Ops.append(True->op_begin() + HasTiedDest, + True->op_begin() + TrueVLIndex - 1); + Ops.append( + {Mask, RoundMode, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)}); + } else { + Ops.append(True->op_begin() + HasTiedDest, + True->op_begin() + TrueVLIndex); + Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)}); + } Ops.push_back(PolicyOp); // Result node should have chain operand of True. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index e98e37d934706..d790a4abd0cc5 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -14148,8 +14148,33 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } } +// Returns the index to the rounding mode immediate value if any, otherwise the +// function will return None. +static std::optional getRoundModeIdx(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + if (!RISCVII::hasRoundModeOp(TSFlags)) + return std::nullopt; + + // The operand order + // ------------------------------------- + // | n-1 (if any) | n-2 | n-3 | n-4 | + // | policy | sew | vl | rm | + // ------------------------------------- + return MI.getNumExplicitOperands() - RISCVII::hasVecPolicyOp(TSFlags) - 3; +} + void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const { + // Add FRM dependency to vector floating-point instructions with dynamic + // rounding mode. + if (auto RoundModeIdx = getRoundModeIdx(MI)) { + unsigned FRMImm = MI.getOperand(*RoundModeIdx).getImm(); + if (FRMImm == RISCVFPRndMode::DYN && !MI.readsRegister(RISCV::FRM)) { + MI.addOperand(MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, + /*isImp*/ true)); + } + } + // Add FRM dependency to any instructions with dynamic rounding mode. unsigned Opc = MI.getOpcode(); auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm); diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp index 7d3d665808b46..4b26c27bb4f8e 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp @@ -13,6 +13,7 @@ // //===----------------------------------------------------------------------===// +#include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCV.h" #include "RISCVSubtarget.h" #include "llvm/CodeGen/MachineFunctionPass.h" @@ -45,8 +46,7 @@ class RISCVInsertReadWriteCSR : public MachineFunctionPass { } private: - bool emitWriteVXRM(MachineBasicBlock &MBB); - std::optional getRoundModeIdx(const MachineInstr &MI); + bool emitWriteRoundingMode(MachineBasicBlock &MBB); }; } // end anonymous namespace @@ -56,10 +56,9 @@ char RISCVInsertReadWriteCSR::ID = 0; INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE, RISCV_INSERT_READ_WRITE_CSR_NAME, false, false) -// This function returns the index to the rounding mode immediate value if any, -// otherwise the function will return None. -std::optional -RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) { +// Returns the index to the rounding mode immediate value if any, otherwise the +// function will return None. +static std::optional getRoundModeIdx(const MachineInstr &MI) { uint64_t TSFlags = MI.getDesc().TSFlags; if (!RISCVII::hasRoundModeOp(TSFlags)) return std::nullopt; @@ -74,17 +73,42 @@ RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) { // This function inserts a write to vxrm when encountering an RVV fixed-point // instruction. -bool RISCVInsertReadWriteCSR::emitWriteVXRM(MachineBasicBlock &MBB) { +bool RISCVInsertReadWriteCSR::emitWriteRoundingMode(MachineBasicBlock &MBB) { bool Changed = false; for (MachineInstr &MI : MBB) { if (auto RoundModeIdx = getRoundModeIdx(MI)) { - Changed = true; - - unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); - BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) - .addImm(VXRMImm); - MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, - /*IsImp*/ true)); + if (RISCVII::usesVXRM(MI.getDesc().TSFlags)) { + unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm(); + + Changed = true; + + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm)) + .addImm(VXRMImm); + MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false, + /*IsImp*/ true)); + } else { // FRM + unsigned FRMImm = MI.getOperand(*RoundModeIdx).getImm(); + + // The value is a hint to this pass to not alter the frm value. + if (FRMImm == RISCVFPRndMode::DYN) + continue; + + Changed = true; + + // Save + MachineRegisterInfo *MRI = &MBB.getParent()->getRegInfo(); + Register SavedFRM = MRI->createVirtualRegister(&RISCV::GPRRegClass); + BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::SwapFRMImm), + SavedFRM) + .addImm(FRMImm); + MI.addOperand(MachineOperand::CreateReg(RISCV::FRM, /*IsDef*/ false, + /*IsImp*/ true)); + // Restore + MachineInstrBuilder MIB = + BuildMI(*MBB.getParent(), {}, TII->get(RISCV::WriteFRM)) + .addReg(SavedFRM); + MBB.insertAfter(MI, MIB); + } } } return Changed; @@ -101,7 +125,7 @@ bool RISCVInsertReadWriteCSR::runOnMachineFunction(MachineFunction &MF) { bool Changed = false; for (MachineBasicBlock &MBB : MF) - Changed |= emitWriteVXRM(MBB); + Changed |= emitWriteRoundingMode(MBB); return Changed; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index ab77d971fb9e5..445dad757dc89 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -217,6 +217,14 @@ class RVInst : + string Constraint, + int UsesVXRM_ = 1> : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, @@ -1106,12 +1107,14 @@ class VPseudoBinaryNoMaskRoundingMode : + string Constraint, + int UsesVXRM_> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -1126,6 +1129,7 @@ class VPseudoBinaryMaskPolicyRoundingMode { + int sew = 0, + int UsesVXRM = 1> { let VLMul = MInfo.value in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); def suffix : VPseudoBinaryNoMaskRoundingMode; + Constraint, UsesVXRM>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + Constraint, + UsesVXRM>, RISCVMaskedPseudo; } } + multiclass VPseudoBinaryM { defm _VV : VPseudoBinary; } +multiclass VPseudoBinaryFV_VV_RM { + defm _VV : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVGTR_VV_EEW { foreach m = MxList in { defvar mx = m.MX; @@ -1982,6 +1995,12 @@ multiclass VPseudoBinaryV_VF; } +multiclass VPseudoBinaryV_VF_RM { + defm "_V" # f.FX : VPseudoBinaryRoundingMode; +} + multiclass VPseudoVSLD1_VF { foreach f = FPList in { foreach m = f.MxList in { @@ -2677,6 +2696,28 @@ multiclass VPseudoVALU_VV_VF { } } +multiclass VPseudoVALU_VV_VF_RM { + foreach m = MxListF in { + defvar mx = m.MX; + defvar WriteVFALUV_MX = !cast("WriteVFALUV_" # mx); + defvar ReadVFALUV_MX = !cast("ReadVFALUV_" # mx); + + defm "" : VPseudoBinaryFV_VV_RM, + Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>; + } + + foreach f = FPList in { + foreach m = f.MxList in { + defvar mx = m.MX; + defvar WriteVFALUF_MX = !cast("WriteVFALUF_" # mx); + defvar ReadVFALUV_MX = !cast("ReadVFALUV_" # mx); + defvar ReadVFALUF_MX = !cast("ReadVFALUF_" # mx); + defm "" : VPseudoBinaryV_VF_RM, + Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>; + } + } +} + multiclass VPseudoVALU_VF { foreach f = FPList in { foreach m = f.MxList in { @@ -5667,8 +5708,12 @@ let Predicates = [HasVInstructionsAnyF] in { //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// +let mayRaiseFPException = true, + hasPostISelHook = 1 in { +defm PseudoVFADD : VPseudoVALU_VV_VF_RM; +} + let Uses = [FRM], mayRaiseFPException = true in { -defm PseudoVFADD : VPseudoVALU_VV_VF; defm PseudoVFSUB : VPseudoVALU_VV_VF; defm PseudoVFRSUB : VPseudoVALU_VF; } @@ -6327,7 +6372,8 @@ defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP", //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// -defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD", + AllFloatVectors>; defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 06eb8de9cc700..4bfa898af4af1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -92,6 +92,30 @@ class VPatBinarySDNode_VV; +class VPatBinarySDNode_VV_RM : + Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2))), + (!cast( + !if(isSEWAware, + instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_VV_"# vlmul.MX)) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, log2sew, TA_MA)>; + class VPatBinarySDNode_XI; +class VPatBinarySDNode_VF_RM : + Pat<(result_type (vop (vop_type vop_reg_class:$rs1), + (vop_type (SplatFPOp xop_kind:$rs2)))), + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), + instruction_name#"_"#vlmul.MX)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, log2sew, TA_MA)>; + multiclass VPatBinaryFPSDNode_VV_VF { foreach vti = AllFloatVectors in { @@ -182,6 +231,21 @@ multiclass VPatBinaryFPSDNode_VV_VF { + foreach vti = AllFloatVectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinarySDNode_VV_RM; + def : VPatBinarySDNode_VF_RM; + } + } +} + multiclass VPatBinaryFPSDNode_R_VF { foreach fvti = AllFloatVectors in @@ -971,7 +1035,7 @@ foreach mti = AllMasks in { // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF_RM; defm : VPatBinaryFPSDNode_VV_VF; defm : VPatBinaryFPSDNode_R_VF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 3092ba94fbe75..6b489883c0747 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -602,6 +602,38 @@ class VPatBinaryVL_V; +class VPatBinaryVL_V_RM + : Pat<(result_type (vop + (op1_type op1_reg_class:$rs1), + (op2_type op2_reg_class:$rs2), + (result_type result_reg_class:$merge), + (mask_type V0), + VLOpFrag)), + (!cast( + !if(isSEWAware, + instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + multiclass VPatTiedBinaryNoMaskVL_V; +class VPatBinaryVL_VF_RM + : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), + (vop2_type (SplatFPOp scalar_reg_class:$rs2)), + (result_type result_reg_class:$merge), + (mask_type V0), + VLOpFrag)), + (!cast( + !if(isSEWAware, + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", + instruction_name#"_"#vlmul.MX#"_MASK")) + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + multiclass VPatBinaryFPVL_VV_VF { foreach vti = AllFloatVectors in { @@ -801,6 +863,22 @@ multiclass VPatBinaryFPVL_VV_VF { + foreach vti = AllFloatVectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : VPatBinaryVL_V_RM; + def : VPatBinaryVL_VF_RM; + } + } +} + multiclass VPatBinaryFPVL_R_VF { foreach fvti = AllFloatVectors in { @@ -1836,7 +1914,7 @@ defm : VPatBinaryVL_VV_VX; // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_VV_VF; defm : VPatBinaryFPVL_R_VF; diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll index 80de207ccfd76..bf6293dbd4208 100644 --- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll @@ -41,7 +41,7 @@ entry: %3 = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( poison, %1, - %2, i64 %vl) + %2, i64 7, i64 %vl) ret %3 } @@ -49,4 +49,4 @@ declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( , , , - i64) + i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll index 36a2d45ffb4bd..e5cd47ee2ea8c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll @@ -106,7 +106,7 @@ define @unfoldable_load( %passthru, ptr %p, declare @llvm.riscv.vmv.v.v.nxv4f32(, , iXLen) -declare @llvm.riscv.vfadd.nxv4f32.nxv4f32(, , , iXLen) +declare @llvm.riscv.vfadd.nxv4f32.nxv4f32(, , , iXLen, iXLen) define @vfadd( %passthru, %a, %b, iXLen %vl1, iXLen %vl2) { ; CHECK-LABEL: vfadd: @@ -116,7 +116,7 @@ define @vfadd( %passthru, @llvm.riscv.vfadd.nxv4f32.nxv4f32( poison, %a, %b, iXLen %vl1) + %v = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( poison, %a, %b, iXLen 7, iXLen %vl1) %w = call @llvm.riscv.vmv.v.v.nxv4f32( %passthru, %v, iXLen %vl2) ret %w } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll index f10252b9618c8..895875518712e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll @@ -10,7 +10,7 @@ define <2 x double> @foo(<2 x double> %x, <2 x double> %y) { ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[DEF]], [[COPY1]], [[COPY]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $frm + ; CHECK-NEXT: [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[DEF]], [[COPY1]], [[COPY]], 7, 2, 6 /* e64 */, 1 /* ta, mu */ ; CHECK-NEXT: $v8 = COPY [[PseudoVFADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %1 = fadd fast <2 x double> %x, %y diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll index 5ebb05eaf86f5..5477c01e2bfa9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll @@ -617,8 +617,7 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: @@ -632,7 +631,7 @@ entry: %0, %1, %2, - iXLen %3, iXLen 3) + iXLen 7, iXLen %3, iXLen 3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll index c8ea5e7d06b5c..ee029926e73e2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll @@ -587,8 +587,7 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - iXLen, - iXLen) + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -601,7 +600,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 7, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll index 87b25ddd8e57d..88fc66c4adc17 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll @@ -587,8 +587,7 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - iXLen, - iXLen) + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -601,7 +600,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 2) + iXLen 7, iXLen %4, iXLen 2) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll index 1c1910474b7ed..d0375fafc89a9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll @@ -587,8 +587,7 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - iXLen, - iXLen) + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry @@ -601,7 +600,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 0) + iXLen 7, iXLen %4, iXLen 0) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll index 15abf865be7e5..2322256af5f8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -83,11 +83,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i32 7, i32 %gvl) %call = call signext i32 @puts(ptr @.str) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i32 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i32 7, i32 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i32, i32 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll index 0067735b492c7..6ef94aaa67800 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -114,11 +114,11 @@ define @foo( %a, @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %gvl) + %x = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %gvl) %call = call signext i32 @puts(ptr @.str) - %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i64 %gvl) + %z = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %x, i64 7, i64 %gvl) ret %z } -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64 %gvl) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( %passthru, %a, %b, i64, i64 %gvl) declare i32 @puts(ptr); diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll index e75a292543247..d079230b33635 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll @@ -38,14 +38,14 @@ define @vpmerge_vfadd( %passthru, @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( %passthru, %x, %y, %m, i64 %vl, i64 1) + %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( %passthru, %x, %y, %m, i64 7, i64 %vl, i64 1) %splat = insertelement poison, i1 -1, i32 0 %mask = shufflevector %splat, poison, zeroinitializer %b = call @llvm.riscv.vmerge.nxv2f32.nxv2f32( %passthru, %passthru, %a, %mask, i64 %vl) ret %b } -declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(, , , , i64, i64) +declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(, , , , i64, i64, i64) define @vpmerge_vfsub( %passthru, %x, %y, %m, i64 %vl) { ; CHECK-LABEL: vpmerge_vfsub: diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll index 42ad8fdeaa29f..a2824d033bc55 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -284,7 +284,7 @@ declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: @@ -297,7 +297,7 @@ entry: %0, %1, %2, - iXLen %3) + iXLen 7, iXLen %3) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll index ebfa04df7c953..63b929557b7e7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -11,20 +11,22 @@ declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -34,14 +36,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( @@ -49,7 +52,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -58,20 +61,22 @@ declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -81,14 +86,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( @@ -96,7 +102,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -105,20 +111,22 @@ declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -128,14 +136,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( @@ -143,7 +152,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -152,20 +161,22 @@ declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -175,14 +186,15 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( @@ -190,7 +202,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -199,20 +211,22 @@ declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -222,14 +236,15 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( @@ -237,7 +252,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -246,20 +261,22 @@ declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -269,15 +286,16 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( @@ -285,7 +303,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -294,20 +312,22 @@ declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -317,14 +337,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( @@ -332,7 +353,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -341,20 +362,22 @@ declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -364,14 +387,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( @@ -379,7 +403,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -388,20 +412,22 @@ declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -411,14 +437,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( @@ -426,7 +453,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -435,20 +462,22 @@ declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -458,14 +487,15 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( @@ -473,7 +503,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -482,20 +512,22 @@ declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -505,15 +537,16 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( @@ -521,7 +554,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -530,20 +563,22 @@ declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -553,14 +588,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( @@ -568,7 +604,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -577,20 +613,22 @@ declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -600,14 +638,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( @@ -615,7 +654,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -624,20 +663,22 @@ declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -647,14 +688,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( @@ -662,7 +704,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -671,20 +713,22 @@ declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( , , , - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( undef, %0, %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -694,15 +738,16 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( , , , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( @@ -710,7 +755,7 @@ entry: %1, %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -719,20 +764,22 @@ declare @llvm.riscv.vfadd.nxv1f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -742,14 +789,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( @@ -757,7 +805,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -766,20 +814,22 @@ declare @llvm.riscv.vfadd.nxv2f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -789,14 +839,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( @@ -804,7 +855,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -813,20 +864,22 @@ declare @llvm.riscv.vfadd.nxv4f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -836,14 +889,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( @@ -851,7 +905,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -860,20 +914,22 @@ declare @llvm.riscv.vfadd.nxv8f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -883,14 +939,15 @@ declare @llvm.riscv.vfadd.mask.nxv8f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( @@ -898,7 +955,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -907,20 +964,22 @@ declare @llvm.riscv.vfadd.nxv16f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -930,14 +989,15 @@ declare @llvm.riscv.vfadd.mask.nxv16f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( @@ -945,7 +1005,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -954,20 +1014,22 @@ declare @llvm.riscv.vfadd.nxv32f16.f16( , , half, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( undef, %0, half %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -977,14 +1039,15 @@ declare @llvm.riscv.vfadd.mask.nxv32f16.f16( , half, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( @@ -992,7 +1055,7 @@ entry: %1, half %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1001,20 +1064,22 @@ declare @llvm.riscv.vfadd.nxv1f32.f32( , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1024,14 +1089,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f32.f32( , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( @@ -1039,7 +1105,7 @@ entry: %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1048,20 +1114,22 @@ declare @llvm.riscv.vfadd.nxv2f32.f32( , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1071,14 +1139,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f32.f32( , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( @@ -1086,7 +1155,7 @@ entry: %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1095,20 +1164,22 @@ declare @llvm.riscv.vfadd.nxv4f32.f32( , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1118,14 +1189,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f32.f32( , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( @@ -1133,7 +1205,7 @@ entry: %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1142,20 +1214,22 @@ declare @llvm.riscv.vfadd.nxv8f32.f32( , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1165,14 +1239,15 @@ declare @llvm.riscv.vfadd.mask.nxv8f32.f32( , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( @@ -1180,7 +1255,7 @@ entry: %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1189,20 +1264,22 @@ declare @llvm.riscv.vfadd.nxv16f32.f32( , , float, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( undef, %0, float %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1212,14 +1289,15 @@ declare @llvm.riscv.vfadd.mask.nxv16f32.f32( , float, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( @@ -1227,7 +1305,7 @@ entry: %1, float %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1236,20 +1314,22 @@ declare @llvm.riscv.vfadd.nxv1f64.f64( , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1259,14 +1339,15 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.f64( , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( @@ -1274,7 +1355,7 @@ entry: %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1283,20 +1364,22 @@ declare @llvm.riscv.vfadd.nxv2f64.f64( , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1306,14 +1389,15 @@ declare @llvm.riscv.vfadd.mask.nxv2f64.f64( , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( @@ -1321,7 +1405,7 @@ entry: %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1330,20 +1414,22 @@ declare @llvm.riscv.vfadd.nxv4f64.f64( , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1353,14 +1439,15 @@ declare @llvm.riscv.vfadd.mask.nxv4f64.f64( , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( @@ -1368,7 +1455,7 @@ entry: %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } @@ -1377,20 +1464,22 @@ declare @llvm.riscv.vfadd.nxv8f64.f64( , , double, - iXLen); + iXLen, iXLen); define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( undef, %0, double %1, - iXLen %2) + iXLen 0, iXLen %2) ret %a } @@ -1400,14 +1489,15 @@ declare @llvm.riscv.vfadd.mask.nxv8f64.f64( , double, , - iXLen, - iXLen); + iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( @@ -1415,7 +1505,7 @@ entry: %1, double %2, %3, - iXLen %4, iXLen 1) + iXLen 0, iXLen %4, iXLen 1) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 2668774528bca..6294407b0e2ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -7,8 +7,8 @@ declare i64 @llvm.riscv.vsetvli(i64, i64, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64) -declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64(, , , i64, i64) +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32(, , , i64, i64) declare @llvm.riscv.vfsub.nxv1f64.nxv1f64(, , , i64) @@ -37,7 +37,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -70,7 +70,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -105,7 +105,7 @@ entry: if.then: ; preds = %entry %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -158,7 +158,7 @@ entry: if.then: ; preds = %entry %0 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 1.000000e+00, i64 %avl) %1 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 2.000000e+00, i64 %avl) - %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 %avl) + %2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %0, %1, i64 7, i64 %avl) %3 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %2, * %3, i64 %avl) br label %if.end @@ -166,7 +166,7 @@ if.then: ; preds = %entry if.else: ; preds = %entry %4 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 1.000000e+00, i64 %avl) %5 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 2.000000e+00, i64 %avl) - %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 %avl) + %6 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %4, %5, i64 7, i64 %avl) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %6, * %7, i64 %avl) br label %if.end @@ -204,7 +204,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -282,7 +282,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -299,7 +299,7 @@ if.then4: ; preds = %if.end %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0) %4 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 1.000000e+00, i64 %3) %5 = tail call @llvm.riscv.vfmv.v.f.nxv1f64.f64( undef, double 2.000000e+00, i64 %3) - %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 %3) + %6 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %4, %5, i64 7, i64 %3) %7 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv1f64( %6, * %7, i64 %3) br label %if.end10 @@ -308,7 +308,7 @@ if.else5: ; preds = %if.end %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0) %9 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 1.000000e+00, i64 %8) %10 = tail call @llvm.riscv.vfmv.v.f.nxv2f32.f32( undef, float 2.000000e+00, i64 %8) - %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 %8) + %11 = tail call @llvm.riscv.vfadd.nxv2f32.nxv2f32( undef, %9, %10, i64 7, i64 %8) %12 = bitcast i8* @scratch to * tail call void @llvm.riscv.vse.nxv2f32( %11, * %12, i64 %8) br label %if.end10 @@ -366,7 +366,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) br label %if.end if.else: ; preds = %entry @@ -426,7 +426,7 @@ entry: br i1 %tobool, label %if.else, label %if.then if.then: ; preds = %entry - %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 %0) + %1 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %a, %b, i64 7, i64 %0) call void @foo() br label %if.end @@ -616,7 +616,7 @@ for.body: ; preds = %entry, %for.body %arrayidx1 = getelementptr inbounds double, double* %b, i64 %i.014 %3 = bitcast double* %arrayidx1 to * %4 = tail call @llvm.riscv.vle.nxv1f64.i64( undef, * %3, i64 %0) - %5 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, %2, %4, i64 %0) + %5 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( undef, %2, %4, i64 7, i64 %0) %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014 %6 = bitcast double* %arrayidx2 to * tail call void @llvm.riscv.vse.nxv1f64.i64( %5, * %6, i64 %0) @@ -981,7 +981,7 @@ exit: declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64) declare @llvm.riscv.vle.nxv1f64.i64(, * nocapture, i64) -declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(, , , i64) +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(, , , i64, i64) declare void @llvm.riscv.vse.nxv1f64.i64(, * nocapture, i64) declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( , diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 9c2582299a290..f4bde8438289e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -8,7 +8,7 @@ declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( , , , - i64) + i64, i64) declare @llvm.riscv.vle.mask.nxv1i64( , *, @@ -27,7 +27,7 @@ entry: undef, %a, %b, - i64 %0) + i64 7, i64 %0) ret %1 } @@ -43,7 +43,7 @@ entry: undef, %a, %b, - i64 %avl) + i64 7, i64 %avl) ret %1 } @@ -232,6 +232,7 @@ entry: %a, %a, %mask, + i64 7, i64 9, i64 0) %y = call @llvm.riscv.vfmv.s.f.nxv1f64( @@ -250,7 +251,7 @@ entry: undef, %a, %b, - i64 -1) + i64 7, i64 -1) ret %0 } @@ -269,12 +270,12 @@ entry: undef, %a, %b, - i64 1) + i64 7, i64 1) %f2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %f1, %b, - i64 %vsetvli) + i64 7, i64 %vsetvli) ret %f2 } @@ -291,12 +292,12 @@ entry: undef, %a, %b, - i64 %avl) + i64 7, i64 %avl) %f2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( undef, %f1, %b, - i64 %vsetvli) + i64 7, i64 %vsetvli) ret %f2 } @@ -321,7 +322,7 @@ entry: undef, %splat, %b, - i64 %vsetvli) + i64 7, i64 %vsetvli) ret %f2 } @@ -342,7 +343,7 @@ entry: undef, %a, %b, - i64 %vsetvli) + i64 7, i64 %vsetvli) %c2 = extractelement %f2, i32 0 %c3 = fadd double %c1, %c2 ret double %c3 @@ -370,7 +371,7 @@ entry: undef, %a, %a, - i64 %x) + i64 7, i64 %x) %y2 = call @llvm.riscv.vfmv.s.f.nxv1f64( %f2, double %b, i64 1) %res = fadd %y, %y2 @@ -574,7 +575,7 @@ entry: undef, %1, %c, - i64 %0) + i64 7, i64 %0) ret %2 } @@ -615,6 +616,7 @@ declare @llvm.riscv.vfadd.mask.nxv1f64.f64( , , i64, + i64, i64); declare @llvm.riscv.vmv.s.x.nxv1i64(