61 changes: 53 additions & 8 deletions clang/include/clang/Support/RISCVVIntrinsicUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,21 @@ enum class TypeModifier : uint8_t {
LLVM_MARK_AS_BITMASK_ENUM(LMUL1),
};

// The Lowerest two bit equal to policy value.
enum Policy : uint8_t {
TU = 0, // For unmasked TU, last two bit is TUMU
TA = 1, // // For unmasked TA, last two bit is TAMU
TUMA = 2,
TAMA = 3,
TUMU = 4,
TAMU = 5,
MU = 6, // For masked MU, last two bit is TAMU
MA = 7, // For masked MA, last two bit is TAMA
TUM = 10, // For masked MA, last two bit is TUMA
TAM = 11, // For masked MA, last two bit is TAMA
PolicyNone,
};

// PrototypeDescriptor is used to compute type info of arguments or return
// value.
struct PrototypeDescriptor {
Expand Down Expand Up @@ -290,9 +305,10 @@ class RVVIntrinsic {
std::string OverloadedName;
std::string IRName;
bool IsMasked;
bool HasMaskedOffOperand;
bool HasVL;
PolicyScheme Scheme;
bool HasUnMaskedOverloaded;
bool SupportOverloading;
bool HasBuiltinAlias;
std::string ManualCodegen;
RVVTypePtr OutputType; // Builtin output type
Expand All @@ -301,29 +317,35 @@ class RVVIntrinsic {
// InputTypes. -1 means the return type.
std::vector<int64_t> IntrinsicTypes;
unsigned NF = 1;
Policy DefaultPolicy = Policy::PolicyNone;

public:
RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix,
llvm::StringRef OverloadedName, llvm::StringRef OverloadedSuffix,
llvm::StringRef IRName, bool IsMasked, bool HasMaskedOffOperand,
bool HasVL, PolicyScheme Scheme, bool HasUnMaskedOverloaded,
bool HasVL, PolicyScheme Scheme, bool SupportOverloading,
bool HasBuiltinAlias, llvm::StringRef ManualCodegen,
const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
const std::vector<llvm::StringRef> &RequiredFeatures,
unsigned NF);
unsigned NF, Policy DefaultPolicy, bool IsPrototypeDefaultTU);
~RVVIntrinsic() = default;

RVVTypePtr getOutputType() const { return OutputType; }
const RVVTypes &getInputTypes() const { return InputTypes; }
llvm::StringRef getBuiltinName() const { return BuiltinName; }
llvm::StringRef getName() const { return Name; }
llvm::StringRef getOverloadedName() const { return OverloadedName; }
bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
bool hasVL() const { return HasVL; }
bool hasPolicy() const { return Scheme != SchemeNone; }
bool hasPassthruOperand() const { return Scheme == HasPassthruOperand; }
bool hasPolicyOperand() const { return Scheme == HasPolicyOperand; }
bool hasUnMaskedOverloaded() const { return HasUnMaskedOverloaded; }
bool hasPolicy() const { return Scheme != PolicyScheme::SchemeNone; }
bool hasPassthruOperand() const {
return Scheme == PolicyScheme::HasPassthruOperand;
}
bool hasPolicyOperand() const {
return Scheme == PolicyScheme::HasPolicyOperand;
}
bool supportOverloading() const { return SupportOverloading; }
bool hasBuiltinAlias() const { return HasBuiltinAlias; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool isMasked() const { return IsMasked; }
Expand All @@ -334,6 +356,14 @@ class RVVIntrinsic {
const std::vector<int64_t> &getIntrinsicTypes() const {
return IntrinsicTypes;
}
Policy getDefaultPolicy() const {
assert(DefaultPolicy != Policy::PolicyNone);
return DefaultPolicy;
}
unsigned getDefaultPolicyBits() const {
assert(DefaultPolicy != Policy::PolicyNone);
return static_cast<unsigned>(DefaultPolicy) & 3;
}

// Return the type string for a BUILTIN() macro in Builtins.def.
std::string getBuiltinTypeStr() const;
Expand All @@ -345,7 +375,17 @@ class RVVIntrinsic {
static llvm::SmallVector<PrototypeDescriptor>
computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
unsigned NF);
unsigned NF, bool IsPrototypeDefaultTU,
PolicyScheme DefaultScheme,
Policy DefaultPolicy = Policy::PolicyNone);
static llvm::SmallVector<Policy>
getSupportedMaskedPolicies(bool HasTailPolicy, bool HasMaskPolicy);

static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
bool IsPrototypeDefaultTU, std::string &Name,
std::string &BuiltinName,
std::string &OverloadedName,
Policy &DefaultPolicy);
};

// RVVRequire should be sync'ed with target features, but only
Expand Down Expand Up @@ -401,6 +441,11 @@ struct RVVIntrinsicRecord {
bool HasMasked : 1;
bool HasVL : 1;
bool HasMaskedOffOperand : 1;
bool IsPrototypeDefaultTU : 1;
bool HasTailPolicy : 1;
bool HasMaskPolicy : 1;
uint8_t UnMaskedPolicyScheme : 2;
uint8_t MaskedPolicyScheme : 2;
};

llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
Expand Down
3 changes: 3 additions & 0 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19105,6 +19105,9 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
unsigned NF = 1;
constexpr unsigned TAIL_UNDISTURBED = 0;
constexpr unsigned TAIL_AGNOSTIC = 1;
constexpr unsigned TAIL_AGNOSTIC_MASK_AGNOSTIC = 3;
int DefaultPolicy = TAIL_UNDISTURBED;

// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
Expand Down
97 changes: 73 additions & 24 deletions clang/lib/Sema/SemaRISCVVectorLookup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
// Create RVVIntrinsicDef.
void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
StringRef OverloadedSuffixStr, bool IsMask,
RVVTypes &Types);
RVVTypes &Types, bool HasPolicy, Policy DefaultPolicy,
bool IsPrototypeDefaultTU);

// Create FunctionDecl for a vector intrinsic.
void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
Expand Down Expand Up @@ -185,15 +186,31 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);

PolicyScheme UnMaskedPolicyScheme =
static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
PolicyScheme MaskedPolicyScheme =
static_cast<PolicyScheme>(Record.MaskedPolicyScheme);

llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false,
Record.HasVL, Record.NF);
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
Record.IsPrototypeDefaultTU, UnMaskedPolicyScheme);

llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq =
RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/true,
Record.HasMaskedOffOperand,
Record.HasVL, Record.NF);
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
Record.HasVL, Record.NF, Record.IsPrototypeDefaultTU,
MaskedPolicyScheme);

bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
// If unmasked builtin supports policy, they should be TU or TA.
llvm::SmallVector<Policy> SupportedUnMaskedPolicies = {Policy::TU,
Policy::TA};
llvm::SmallVector<Policy> SupportedMaskedPolicies =
RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
Record.HasMaskPolicy);

for (unsigned int TypeRangeMaskShift = 0;
TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
Expand Down Expand Up @@ -242,33 +259,63 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
BaseType, Log2LMUL, OverloadedSuffixProto);

// Create non-masked intrinsic.
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types);

if (Record.HasMasked) {
// Create masked intrinsic.
Optional<RVVTypes> MaskTypes = RVVType::computeTypes(
BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);

InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
*MaskTypes);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
UnMaskedHasPolicy, Policy::PolicyNone,
Record.IsPrototypeDefaultTU);

// Create non-masked policy intrinsic.
if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
for (auto P : SupportedUnMaskedPolicies) {
llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
Record.IsPrototypeDefaultTU, UnMaskedPolicyScheme, P);
Optional<RVVTypes> PolicyTypes = RVVType::computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
/*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
P, Record.IsPrototypeDefaultTU);
}
}
}
}
if (!Record.HasMasked)
continue;
// Create masked intrinsic.
Optional<RVVTypes> MaskTypes =
RVVType::computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
*MaskTypes, MaskedHasPolicy, Policy::PolicyNone,
Record.IsPrototypeDefaultTU);
if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
continue;
// Create masked policy intrinsic.
for (auto P : SupportedMaskedPolicies) {
llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
Record.HasVL, Record.NF, Record.IsPrototypeDefaultTU,
MaskedPolicyScheme, P);
Optional<RVVTypes> PolicyTypes = RVVType::computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
/*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P,
Record.IsPrototypeDefaultTU);
}
} // End for different LMUL
} // End for different TypeRange
}
}

// Compute name and signatures for intrinsic with practical types.
void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
const RVVIntrinsicRecord &Record, StringRef SuffixStr,
StringRef OverloadedSuffixStr, bool IsMask, RVVTypes &Signature) {
StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
bool HasPolicy, Policy DefaultPolicy, bool IsPrototypeDefaultTU) {
// Function name, e.g. vadd_vv_i32m1.
std::string Name = Record.Name;
if (!SuffixStr.empty())
Name += "_" + SuffixStr.str();

if (IsMask)
Name += "_m";

// Overloaded function name, e.g. vadd.
std::string OverloadedName;
if (!Record.OverloadedName)
Expand All @@ -280,8 +327,10 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(

// clang built-in function name, e.g. __builtin_rvv_vadd.
std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
if (IsMask)
BuiltinName += "_m";

RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, IsPrototypeDefaultTU,
Name, BuiltinName, OverloadedName,
DefaultPolicy);

// Put into IntrinsicList.
size_t Index = IntrinsicList.size();
Expand Down
153 changes: 134 additions & 19 deletions clang/lib/Support/RISCVVIntrinsicUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -849,13 +849,15 @@ RVVIntrinsic::RVVIntrinsic(
StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
bool HasUnMaskedOverloaded, bool HasBuiltinAlias, StringRef ManualCodegen,
bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures, unsigned NF)
: IRName(IRName), IsMasked(IsMasked), HasVL(HasVL), Scheme(Scheme),
HasUnMaskedOverloaded(HasUnMaskedOverloaded),
HasBuiltinAlias(HasBuiltinAlias), ManualCodegen(ManualCodegen.str()),
NF(NF) {
const std::vector<StringRef> &RequiredFeatures, unsigned NF,
Policy NewDefaultPolicy, bool IsPrototypeDefaultTU)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
ManualCodegen(ManualCodegen.str()), NF(NF),
DefaultPolicy(NewDefaultPolicy) {

// Init BuiltinName, Name and OverloadedName
BuiltinName = NewName.str();
Expand All @@ -868,10 +870,9 @@ RVVIntrinsic::RVVIntrinsic(
Name += "_" + Suffix.str();
if (!OverloadedSuffix.empty())
OverloadedName += "_" + OverloadedSuffix.str();
if (IsMasked) {
BuiltinName += "_m";
Name += "_m";
}

updateNamesAndPolicy(IsMasked, hasPolicy(), IsPrototypeDefaultTU, Name,
BuiltinName, OverloadedName, DefaultPolicy);

// Init OutputType and InputTypes
OutputType = OutInTypes[0];
Expand All @@ -880,8 +881,8 @@ RVVIntrinsic::RVVIntrinsic(
// IntrinsicTypes is unmasked TA version index. Need to update it
// if there is merge operand (It is always in first operand).
IntrinsicTypes = NewIntrinsicTypes;
if ((IsMasked && HasMaskedOffOperand) ||
(!IsMasked && hasPassthruOperand())) {
if ((IsMasked && hasMaskedOffOperand()) ||
(!IsMasked && hasPassthruOperand() && !IsPrototypeDefaultTU)) {
for (auto &I : IntrinsicTypes) {
if (I >= 0)
I += NF;
Expand Down Expand Up @@ -909,18 +910,40 @@ std::string RVVIntrinsic::getSuffixStr(
return join(SuffixStrs, "_");
}

llvm::SmallVector<PrototypeDescriptor>
RVVIntrinsic::computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
bool IsMasked, bool HasMaskedOffOperand,
bool HasVL, unsigned NF) {
llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
llvm::ArrayRef<PrototypeDescriptor> Prototype, bool IsMasked,
bool HasMaskedOffOperand, bool HasVL, unsigned NF,
bool IsPrototypeDefaultTU, PolicyScheme DefaultScheme,
Policy DefaultPolicy) {
SmallVector<PrototypeDescriptor> NewPrototype(Prototype.begin(),
Prototype.end());
// Update DefaultPolicy if need (TA or TAMA) for compute builtin types.
switch (DefaultPolicy) {
case Policy::MA:
DefaultPolicy = Policy::TAMA;
break;
case Policy::TAM:
DefaultPolicy = Policy::TAMA;
break;
case Policy::PolicyNone:
// Masked with no policy would not be TAMA.
if (!IsMasked) {
if (IsPrototypeDefaultTU)
DefaultPolicy = Policy::TU;
else
DefaultPolicy = Policy::TA;
}
break;
default:
break;
}
if (IsMasked) {
// If HasMaskedOffOperand, insert result type as first input operand.
// If HasMaskedOffOperand, insert result type as first input operand if
// need.
if (HasMaskedOffOperand) {
if (NF == 1) {
if (NF == 1 && DefaultPolicy != Policy::TAMA) {
NewPrototype.insert(NewPrototype.begin() + 1, NewPrototype[0]);
} else {
} else if (NF > 1) {
// Convert
// (void, op0 address, op1 address, ...)
// to
Expand All @@ -943,6 +966,13 @@ RVVIntrinsic::computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
// If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
}
} else if (NF == 1) {
bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
if (DefaultPolicy == Policy::TU && HasPassthruOp && !IsPrototypeDefaultTU)
NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
else if (DefaultPolicy == Policy::TA && HasPassthruOp &&
IsPrototypeDefaultTU)
NewPrototype.erase(NewPrototype.begin() + 1);
}

// If HasVL, append PrototypeDescriptor:VL to last operand
Expand All @@ -951,6 +981,86 @@ RVVIntrinsic::computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
return NewPrototype;
}

llvm::SmallVector<Policy>
RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
bool HasMaskPolicy) {
if (HasTailPolicy && HasMaskPolicy)
return {Policy::TUMA, Policy::TAMA, Policy::TUMU, Policy::TAMU};
else if (HasTailPolicy)
return {Policy::TUM, Policy::TAM};
return {Policy::MA, Policy::MU};
}

void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
bool IsPrototypeDefaultTU,
std::string &Name,
std::string &BuiltinName,
std::string &OverloadedName,
Policy &DefaultPolicy) {

auto appendPolicySuffix = [&](const std::string &suffix) {
Name += suffix;
BuiltinName += suffix;
OverloadedName += suffix;
};

switch (DefaultPolicy) {
case Policy::TU:
appendPolicySuffix("_tu");
break;
case Policy::TA:
appendPolicySuffix("_ta");
break;
case Policy::MU:
appendPolicySuffix("_mu");
DefaultPolicy = Policy::TAMU;
break;
case Policy::MA:
appendPolicySuffix("_ma");
DefaultPolicy = Policy::TAMA;
break;
case Policy::TUM:
appendPolicySuffix("_tum");
DefaultPolicy = Policy::TUMA;
break;
case Policy::TAM:
appendPolicySuffix("_tam");
DefaultPolicy = Policy::TAMA;
break;
case Policy::TUMU:
appendPolicySuffix("_tumu");
break;
case Policy::TAMU:
appendPolicySuffix("_tamu");
break;
case Policy::TUMA:
appendPolicySuffix("_tuma");
break;
case Policy::TAMA:
appendPolicySuffix("_tama");
break;
default:
if (IsMasked) {
Name += "_m";
// FIXME: Currently _m default policy implementation is different with
// RVV intrinsic spec (TUMA)
DefaultPolicy = Policy::TUMU;
if (HasPolicy)
BuiltinName += "_tumu";
else
BuiltinName += "_m";
} else if (IsPrototypeDefaultTU) {
DefaultPolicy = Policy::TU;
if (HasPolicy)
BuiltinName += "_tu";
} else {
DefaultPolicy = Policy::TA;
if (HasPolicy)
BuiltinName += "_ta";
}
}
}

SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
SmallVector<PrototypeDescriptor> PrototypeDescriptors;
const StringRef Primaries("evwqom0ztul");
Expand Down Expand Up @@ -993,6 +1103,11 @@ raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
OS << (int)Record.HasMasked << ",";
OS << (int)Record.HasVL << ",";
OS << (int)Record.HasMaskedOffOperand << ",";
OS << (int)Record.IsPrototypeDefaultTU << ",";
OS << (int)Record.HasTailPolicy << ",";
OS << (int)Record.HasMaskPolicy << ",";
OS << (int)Record.UnMaskedPolicyScheme << ",";
OS << (int)Record.MaskedPolicyScheme << ",";
OS << "},\n";
return OS;
}
Expand Down
2 changes: 1 addition & 1 deletion clang/test/CodeGen/RISCV/riscv-attr-builtin-alias.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
static inline __attribute__((__always_inline__, __nodebug__))

__rvv_generic
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv)))
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_ta)))
vint8m1_t vadd_generic (vint8m1_t op0, vint8m1_t op1, size_t op2);

// CHECK-LABEL: @test(
Expand Down
216 changes: 216 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1702,3 +1702,219 @@ vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vaaddu(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vaadd_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vaadd_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vaadd_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vaaddu_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vaaddu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vaaddu_tamu(mask, merge, op1, op2, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
Original file line number Diff line number Diff line change
Expand Up @@ -883,3 +883,75 @@ vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
size_t vl) {
return vadc(op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
return vadc_tu(merge, op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
return vadc_tu(merge, op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
return vadc_tu(merge, op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {
return vadc_tu(merge, op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) {
return vadc_ta(op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadc_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) {
return vadc_ta(op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) {
return vadc_ta(op1, op2, carryin, vl);
}

// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadc_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) {
return vadc_ta(op1, op2, carryin, vl);
}
216 changes: 216 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1588,3 +1588,219 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
return vadd(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
return vadd_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
return vadd_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
return vadd_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
return vadd_tamu(mask, merge, op1, op2, vl);
}
216 changes: 216 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
Original file line number Diff line number Diff line change
Expand Up @@ -1587,3 +1587,219 @@ vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
return vand(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vand_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vand_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vand_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vand_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vand_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vand_tamu(mask, merge, op1, op2, vl);
}
216 changes: 216 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1702,3 +1702,219 @@ vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vasubu(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vasub_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vasub_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vasubu_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vasubu_tamu(mask, merge, op1, op2, vl);
}
216 changes: 216 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1587,3 +1587,219 @@ vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m
vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
return vdivu(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_tu(merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_ta(op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_tuma(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_tumu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_tama(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vdiv_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vdiv_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vdiv_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vdivu_tamu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vdivu_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vdivu_tamu(mask, merge, op1, op2, vl);
}
Loading