diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h index cdd19189f8dc7..d80ad7b6e9a8c 100644 --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -51,6 +51,13 @@ enum VLMUL : uint8_t { LMUL_F2 }; +enum VSEW : uint8_t { + SEW_8 = 0, + SEW_16, + SEW_32, + SEW_64, +}; + enum { TAIL_UNDISTURBED_MASK_UNDISTURBED = 0, TAIL_AGNOSTIC = 1, diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 08f056f78979a..0f2075cb29a04 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -18,10 +18,13 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/TargetParser/RISCVISAInfo.h" #include "llvm/TargetParser/RISCVTargetParser.h" #include "llvm/TargetParser/SubtargetFeature.h" +#include namespace llvm { @@ -123,6 +126,12 @@ enum { // 3 -> widening case TargetOverlapConstraintTypeShift = UsesVXRMShift + 1, TargetOverlapConstraintTypeMask = 3ULL << TargetOverlapConstraintTypeShift, + + HasImplictSEWShift = TargetOverlapConstraintTypeShift + 2, + HasImplictSEWMask = 1 << HasImplictSEWShift, + + VSEWShift = HasImplictSEWShift + 1, + VSEWMask = 0b11 << VSEWShift, }; // Helper functions to read TSFlags. @@ -171,14 +180,29 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) { /// \returns true if this instruction uses vxrm static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; } +/// \returns true if this instruction has implicit SEW value. +static inline bool hasImplicitSEW(uint64_t TSFlags) { + return TSFlags & HasImplictSEWMask; +} + +/// \returns the VSEW for the instruction. +static inline VSEW getVSEW(uint64_t TSFlags) { + return static_cast((TSFlags & VSEWMask) >> VSEWShift); +} + +/// \returns true if there is a SEW value for the instruction. +static inline bool hasSEW(uint64_t TSFlags) { + return hasSEWOp(TSFlags) || hasImplicitSEW(TSFlags); +} + static inline unsigned getVLOpNum(const MCInstrDesc &Desc) { const uint64_t TSFlags = Desc.TSFlags; - // This method is only called if we expect to have a VL operand, and all - // instructions with VL also have SEW. - assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags)); - unsigned Offset = 2; + // This method is only called if we expect to have a VL operand. + assert(hasVLOp(TSFlags)); + // Some instructions don't have SEW operand. + unsigned Offset = 1 + hasSEWOp(TSFlags); if (hasVecPolicyOp(TSFlags)) - Offset = 3; + Offset = Offset + 1; return Desc.getNumOperands() - Offset; } @@ -191,6 +215,28 @@ static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) { return Desc.getNumOperands() - Offset; } +static inline unsigned getLog2SEW(uint64_t TSFlags) { + return 3 + RISCVII::getVSEW(TSFlags); +} + +static inline MachineOperand getSEWOp(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + assert(hasSEW(TSFlags) && "The instruction doesn't have SEW value!"); + if (hasSEWOp(TSFlags)) + return MI.getOperand(getSEWOpNum(MI.getDesc())); + + return MachineOperand::CreateImm(getLog2SEW(TSFlags)); +} + +static inline unsigned getLog2SEW(const MachineInstr &MI) { + uint64_t TSFlags = MI.getDesc().TSFlags; + assert(RISCVII::hasSEW(TSFlags) && "The instruction doesn't have SEW value!"); + if (RISCVII::hasSEWOp(TSFlags)) + return MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); + + return getLog2SEW(TSFlags); +} + static inline unsigned getVecPolicyOpNum(const MCInstrDesc &Desc) { assert(hasVecPolicyOp(Desc.TSFlags)); return Desc.getNumOperands() - 1; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index b0568297a470a..d5db2717a721d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -317,8 +317,11 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands( Operands.push_back(VL); MVT XLenVT = Subtarget->getXLenVT(); - SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); - Operands.push_back(SEWOp); + // Add SEW operand if it is indexed or mask load/store instruction. + if (Log2SEW == 0 || IndexVT) { + SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); + Operands.push_back(SEWOp); + } // At the IR layer, all the masked load intrinsics have policy operands, // none of the others do. All have passthru operands. For our pseudos, @@ -2226,7 +2229,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { selectVLOp(Node->getOperand(2), VL); unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); - SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); // If VL=1, then we don't need to do a strided load and can just do a // regular load. @@ -2243,7 +2245,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT)); uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); - Operands.append({VL, SEW, PolicyOp, Ld->getChain()}); + Operands.append({VL, PolicyOp, Ld->getChain()}); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( @@ -2970,7 +2972,7 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, const MCInstrDesc &MCID = TII->get(User->getMachineOpcode()); const uint64_t TSFlags = MCID.TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) + if (!RISCVII::hasSEW(TSFlags)) return false; assert(RISCVII::hasVLOp(TSFlags)); @@ -2980,7 +2982,9 @@ static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TSFlags); unsigned VLIdx = User->getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; - const unsigned Log2SEW = User->getConstantOperandVal(VLIdx + 1); + const unsigned Log2SEW = RISCVII::hasSEWOp(TSFlags) + ? User->getConstantOperandVal(VLIdx + 1) + : RISCVII::getLog2SEW(TSFlags); if (UserOpNo == VLIdx) return false; @@ -3696,12 +3700,18 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { return false; } + SDLoc DL(N); + // The vector policy operand may be present for masked intrinsics bool HasVecPolicyOp = RISCVII::hasVecPolicyOp(TrueTSFlags); - unsigned TrueVLIndex = - True.getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2; + bool HasSEWOp = RISCVII::hasSEWOp(TrueTSFlags); + unsigned TrueVLIndex = True.getNumOperands() - HasVecPolicyOp - HasChainOp - + HasGlueOp - 1 - HasSEWOp; SDValue TrueVL = True.getOperand(TrueVLIndex); - SDValue SEW = True.getOperand(TrueVLIndex + 1); + SDValue SEW = + HasSEWOp ? True.getOperand(TrueVLIndex + 1) + : CurDAG->getTargetConstant(RISCVII::getLog2SEW(TrueTSFlags), DL, + Subtarget->getXLenVT()); auto GetMinVL = [](SDValue LHS, SDValue RHS) { if (LHS == RHS) @@ -3732,8 +3742,6 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { !True->getFlags().hasNoFPExcept()) return false; - SDLoc DL(N); - // From the preconditions we checked above, we know the mask and thus glue // for the result node will be taken from True. if (IsMasked) { @@ -3799,7 +3807,10 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { if (HasRoundingMode) Ops.push_back(True->getOperand(TrueVLIndex - 1)); - Ops.append({VL, SEW, PolicyOp}); + Ops.push_back(VL); + if (RISCVII::hasSEWOp(TrueTSFlags)) + Ops.push_back(SEW); + Ops.push_back(PolicyOp); // Result node should have chain operand of True. if (HasChainOp) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 3ab9e7d69105c..c317f63aadd62 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -17857,7 +17857,6 @@ static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI, .add(MI.getOperand(3)) .add(MachineOperand::CreateImm(7)) // frm = DYN .add(MI.getOperand(4)) - .add(MI.getOperand(5)) .add(MI.getOperand(6)) .add(MachineOperand::CreateReg(RISCV::FRM, /*IsDef*/ false, diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index b5fd508fa77de..ffb4bdd1cd392 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -55,10 +55,6 @@ static unsigned getVLOpNum(const MachineInstr &MI) { return RISCVII::getVLOpNum(MI.getDesc()); } -static unsigned getSEWOpNum(const MachineInstr &MI) { - return RISCVII::getSEWOpNum(MI.getDesc()); -} - static bool isVectorConfigInstr(const MachineInstr &MI) { return MI.getOpcode() == RISCV::PseudoVSETVLI || MI.getOpcode() == RISCV::PseudoVSETVLIX0 || @@ -166,9 +162,9 @@ static bool isNonZeroLoadImmediate(const MachineInstr &MI) { /// Return true if this is an operation on mask registers. Note that /// this includes both arithmetic/logical ops and load/store (vlm/vsm). static bool isMaskRegOp(const MachineInstr &MI) { - if (!RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + if (!RISCVII::hasSEW(MI.getDesc().TSFlags)) return false; - const unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + const unsigned Log2SEW = RISCVII::getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. return Log2SEW == 0; } @@ -383,7 +379,7 @@ DemandedFields getDemanded(const MachineInstr &MI, Res.demandVTYPE(); // Start conservative on the unlowered form too uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(TSFlags)) { Res.demandVTYPE(); if (RISCVII::hasVLOp(TSFlags)) Res.demandVL(); @@ -405,7 +401,7 @@ DemandedFields getDemanded(const MachineInstr &MI, } // Store instructions don't use the policy fields. - if (RISCVII::hasSEWOp(TSFlags) && MI.getNumExplicitDefs() == 0) { + if (RISCVII::hasSEW(TSFlags) && MI.getNumExplicitDefs() == 0) { Res.TailPolicy = false; Res.MaskPolicy = false; } @@ -940,7 +936,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags, RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); - unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); + unsigned Log2SEW = RISCVII::getLog2SEW(MI); // A Log2SEW of 0 is an operation on mask registers only. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); @@ -1176,7 +1172,7 @@ static VSETVLIInfo adjustIncoming(VSETVLIInfo PrevInfo, VSETVLIInfo NewInfo, void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, const MachineInstr &MI) const { uint64_t TSFlags = MI.getDesc().TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) + if (!RISCVII::hasSEW(TSFlags)) return; const VSETVLIInfo NewInfo = computeInfoForInstr(MI, TSFlags, *ST, MRI); @@ -1256,7 +1252,7 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB, for (const MachineInstr &MI : MBB) { transferBefore(Info, MI); - if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags)) + if (isVectorConfigInstr(MI) || RISCVII::hasSEW(MI.getDesc().TSFlags)) HadVectorOp = true; transferAfter(Info, MI); @@ -1385,7 +1381,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) { } uint64_t TSFlags = MI.getDesc().TSFlags; - if (RISCVII::hasSEWOp(TSFlags)) { + if (RISCVII::hasSEW(TSFlags)) { if (PrevInfo != CurInfo) { // If this is the first implicit state change, and the state change // requested can be proven to produce the same register contents, we diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index a5c8524d05cbc..01e514609eaf3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -223,6 +223,13 @@ class RVInstCommon widening case bits<2> TargetOverlapConstraintType = 0; let TSFlags{22-21} = TargetOverlapConstraintType; + + bit HasImplictSEW = 0; + let TSFlags{23} = HasImplictSEW; + + // The actual SEW value is 8 * (2 ^ VSEW). + bits<2> VSEW = 0; + let TSFlags{25-24} = VSEW; } class RVInstgetDesc(); - MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL - MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW - MIB.addImm(0); // tu, mu + MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL + MIB.add(RISCVII::getSEWOp(*DefMBBI)); // SEW + MIB.addImm(0); // tu, mu MIB.addReg(RISCV::VL, RegState::Implicit); MIB.addReg(RISCV::VTYPE, RegState::Implicit); } @@ -1706,8 +1706,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root, return false; // SEW - if (RISCVII::hasSEWOp(TSFlags) && - !checkImmOperand(RISCVII::getSEWOpNum(Desc))) + if (RISCVII::hasSEW(TSFlags) && !checkImmOperand(RISCVII::getSEWOpNum(Desc))) return false; // Mask @@ -2463,10 +2462,6 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, return false; } } - if (!RISCVII::hasSEWOp(TSFlags)) { - ErrInfo = "VL operand w/o SEW operand?"; - return false; - } } if (RISCVII::hasSEWOp(TSFlags)) { unsigned OpIdx = RISCVII::getSEWOpNum(Desc); @@ -3521,8 +3516,8 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI, case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV): case CASE_FP_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV): { assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) && - MI.getNumExplicitOperands() == 7 && - "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy"); + MI.getNumExplicitOperands() == 6 && + "Expect 6 explicit operands rd, rs2, rs1, rm, vl, policy"); // If the tail policy is undisturbed we can't convert. if ((MI.getOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())).getImm() & 1) == 0) @@ -3545,8 +3540,7 @@ MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI, .add(MI.getOperand(2)) .add(MI.getOperand(3)) .add(MI.getOperand(4)) - .add(MI.getOperand(5)) - .add(MI.getOperand(6)); + .add(MI.getOperand(5)); break; } case CASE_WIDEOP_OPCODE_LMULS(WADD_WV): diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index fc60a9cc7cd30..21086688f13c7 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -769,15 +769,20 @@ class GetVTypeScalarPredicates { class VPseudoUSLoadNoMask : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew, - ixlenimm:$policy), []>, + !if(!eq(EEW, 1), + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + defvar hasSEWOp = !eq(EEW, 1); + let HasSEWOp = hasSEWOp; + // For mask load, EEW = 1. + let HasImplictSEW = !not(hasSEWOp); + let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8))); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -787,7 +792,7 @@ class VPseudoUSLoadMask.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -795,7 +800,8 @@ class VPseudoUSLoadMask : Pseudo<(outs RetClass:$rd, GPR:$vl), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -821,7 +827,7 @@ class VPseudoUSLoadFFMask.R:$rd, GPR:$vl), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -829,7 +835,8 @@ class VPseudoUSLoadFFMask : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -855,7 +862,7 @@ class VPseudoSLoadMask.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, GPR:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLE { let mayLoad = 1; @@ -863,7 +870,8 @@ class VPseudoSLoadMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>, + !if(!eq(EEW, 1), + (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), + (ins StClass:$rd, GPRMem:$rs1, AVL:$vl)), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + // For mask store, EEW = 1. + defvar hasSEWOp = !eq(EEW, 1); + let HasSEWOp = hasSEWOp; + let HasImplictSEW = !not(hasSEWOp); + let VSEW = !if(hasSEWOp, 0, !logtwo(!div(EEW, 8))); } class VPseudoUSStoreMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSStoreNoMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, - AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSStoreMask : Pseudo<(outs), - (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSE { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoNullaryNoMask : @@ -1018,10 +1032,14 @@ class VPseudoNullaryPseudoM : class VPseudoUnaryNoMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, OpClass:$rs2, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, OpClass:$rs2, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, OpClass:$rs2, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1029,17 +1047,24 @@ class VPseudoUnaryNoMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1047,19 +1072,26 @@ class VPseudoUnaryNoMaskRoundingMode : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, OpClass:$rs2, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1067,19 +1099,27 @@ class VPseudoUnaryMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, OpClass:$rs2, - VMaskOp:$vm, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1087,11 +1127,14 @@ class VPseudoUnaryMaskRoundingMode : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, OpClass:$rs2, ixlenimm:$frm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1125,19 +1172,27 @@ class VPseudoUnaryNoMask_FRM : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, OpClass:$rs2, - VMaskOp:$vm, ixlenimm:$frm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$frm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, ixlenimm:$frm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1145,10 +1200,13 @@ class VPseudoUnaryMask_FRM : Pseudo<(outs RetClass:$rd), (ins RetClass:$merge, Op1Class:$rs2, - VR:$vm, AVL:$vl, ixlenimm:$sew), []>, + VR:$vm, AVL:$vl), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = "@earlyclobber $rd, $rd = $merge"; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoBinaryNoMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1220,8 +1284,11 @@ class VPseudoBinaryNoMaskTU : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = UsesVXRM_; + let HasImplictSEW = !not(hasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoBinaryMaskPolicyRoundingMode : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, ixlenimm:$rm, AVL:$vl, + ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let TargetOverlapConstraintType = TargetConstraintType; let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let UsesMaskPolicy = 1; let HasRoundModeOp = 1; let UsesVXRM = UsesVXRM_; + let HasImplictSEW = !not(hasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } // Special version of VPseudoBinaryNoMask where we pretend the first source is @@ -1295,12 +1378,17 @@ class VPseudoTiedBinaryNoMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs2, Op2Class:$rs1, - ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, - ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs2, Op2Class:$rs1, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, + ixlenimm:$policy), + (ins RetClass:$rs2, Op2Class:$rs1, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1308,12 +1396,15 @@ class VPseudoTiedBinaryNoMaskRoundingMode LMUL, @@ -1365,11 +1456,16 @@ class VPseudoBinaryMaskPolicy : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1377,49 +1473,70 @@ class VPseudoBinaryMaskPolicy : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; + let HasImplictSEW = !not(hasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } class VPseudoTernaryMaskPolicyRoundingMode : + string Constraint, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, - ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let Constraints = !interleave([Constraint, "$rd = $merge"], ","); let HasVLOp = 1; - let HasSEWOp = 1; + let HasSEWOp = hasSEWOp; let HasVecPolicyOp = 1; let HasRoundModeOp = 1; let UsesVXRM = 0; + let HasImplictSEW = !not(hasSEWOp); + defvar sewDividedBy8 = !div(SEW, 8); + let VSEW = !if(!gt(sewDividedBy8, 0), !logtwo(sewDividedBy8), 0); } // Like VPseudoBinaryNoMask, but output can be V0. @@ -1488,13 +1605,20 @@ class VPseudoTiedBinaryMask : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs GetVRegNoV0.R:$rd), - (ins GetVRegNoV0.R:$merge, - Op2Class:$rs1, - VMaskOp:$vm, - ixlenimm:$rm, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins GetVRegNoV0.R:$merge, + Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins GetVRegNoV0.R:$merge, + Op2Class:$rs1, + VMaskOp:$vm, + ixlenimm:$rm, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1502,12 +1626,15 @@ class VPseudoTiedBinaryMaskRoundingMode : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1591,17 +1722,24 @@ class VPseudoTernaryNoMaskWithPolicy : + int TargetConstraintType = 1, + bit hasSEWOp = 1> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + !if(hasSEWOp, + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + ixlenimm:$rm, AVL:$vl, ixlenimm:$policy)), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; @@ -1610,24 +1748,27 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode NF> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -1637,7 +1778,7 @@ class VPseudoUSSegLoadMask NF> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1645,7 +1786,8 @@ class VPseudoUSSegLoadMask NF> : Pseudo<(outs RetClass:$rd, GPR:$vl), - (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $dest"; } @@ -1672,7 +1814,7 @@ class VPseudoUSSegLoadFFMask NF> : Pseudo<(outs GetVRegNoV0.R:$rd, GPR:$vl), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy), []>, + VMaskOp:$vm, AVL:$avl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1680,7 +1822,8 @@ class VPseudoUSSegLoadFFMask NF> : Pseudo<(outs RetClass:$rd), - (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, - ixlenimm:$sew, ixlenimm:$policy), []>, + (ins RetClass:$merge, GPRMem:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); let HasVecPolicyOp = 1; let Constraints = "$rd = $merge"; } @@ -1707,8 +1850,7 @@ class VPseudoSSegLoadMask NF> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPRMem:$rs1, - GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, - ixlenimm:$policy), []>, + GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$policy), []>, RISCVVPseudo, RISCVVLSEG { let mayLoad = 1; @@ -1716,7 +1858,8 @@ class VPseudoSSegLoadMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoUSSegStoreMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSSegStoreNoMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset, - AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoSSegStoreMask NF> : Pseudo<(outs), - (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, - VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>, + (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset, VMaskOp:$vm, AVL:$vl), []>, RISCVVPseudo, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; let hasSideEffects = 0; let HasVLOp = 1; - let HasSEWOp = 1; + let HasImplictSEW = 1; + let VSEW = !logtwo(!div(EEW, 8)); } class VPseudoISegStoreNoMask { let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskTU; + Constraint, TargetConstraintType, + hasSEWOp=hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, TargetConstraintType, + hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -2150,15 +2297,18 @@ multiclass VPseudoBinaryRoundingMode { let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix : VPseudoBinaryNoMaskRoundingMode; + TargetConstraintType, + hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + TargetConstraintType, + hasSEWOp>, RISCVMaskedPseudo; } } @@ -2190,10 +2340,11 @@ multiclass VPseudoBinaryEmul { let VLMul = lmul.value, SEW=sew in { defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX); + defvar hasSEWOp = !eq(sew, 0); def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU; + Constraint, hasSEWOp=hasSEWOp>; def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy, + Constraint, hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -2218,17 +2369,21 @@ multiclass VPseudoTiedBinaryRoundingMode { - defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); - let VLMul = MInfo.value in { - def suffix # "_TIED": - VPseudoTiedBinaryNoMaskRoundingMode; - def suffix # "_MASK_TIED" : - VPseudoTiedBinaryMaskRoundingMode, - RISCVMaskedPseudo; + defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); + let VLMul = MInfo.value, SEW = sew in { + def suffix # "_TIED" : VPseudoTiedBinaryNoMaskRoundingMode; + def suffix # "_MASK_TIED" : VPseudoTiedBinaryMaskRoundingMode, + RISCVMaskedPseudo; } } - multiclass VPseudoBinaryV_VV { defm _VV : VPseudoBinary; } @@ -2568,11 +2723,11 @@ multiclass VPseudoVSQR_V_RM { foreach e = sews in { defvar suffix = "_" # mx # "_E" # e; let SEW = e in { - def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, + def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; def "_V" #suffix # "_MASK" - : VPseudoUnaryMaskRoundingMode, + : VPseudoUnaryMaskRoundingMode, RISCVMaskedPseudo, SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e, forceMergeOpRead=true>; @@ -2585,12 +2740,12 @@ multiclass VPseudoVRCP_V { foreach m = MxListF in { defvar mx = m.MX; foreach e = SchedSEWSet.val in { - let VLMul = m.value in { + let VLMul = m.value, SEW = e in { def "_V_" # mx # "_E" # e - : VPseudoUnaryNoMask, + : VPseudoUnaryNoMask, SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>; def "_V_" # mx # "_E" # e # "_MASK" - : VPseudoUnaryMask, + : VPseudoUnaryMask, RISCVMaskedPseudo, SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>; } @@ -2602,12 +2757,12 @@ multiclass VPseudoVRCP_V_RM { foreach m = MxListF in { defvar mx = m.MX; foreach e = SchedSEWSet.val in { - let VLMul = m.value in { + let VLMul = m.value, SEW = e in { def "_V_" # mx # "_E" # e - : VPseudoUnaryNoMaskRoundingMode, + : VPseudoUnaryNoMaskRoundingMode, SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>; def "_V_" # mx # "_E" # e # "_MASK" - : VPseudoUnaryMaskRoundingMode, + : VPseudoUnaryMaskRoundingMode, RISCVMaskedPseudo, SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forceMergeOpRead=true>; } @@ -3205,8 +3360,12 @@ multiclass VPseudoTernaryWithTailPolicy; - def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy, + def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy; + def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy, RISCVMaskedPseudo; } } @@ -3223,10 +3382,12 @@ multiclass VPseudoTernaryWithTailPolicyRoundingMode; + Op2Class, Constraint, + hasSEWOp=0>; def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicyRoundingMode, + Op2Class, Constraint, + hasSEWOp=0>, RISCVMaskedPseudo; } } @@ -3254,18 +3415,21 @@ multiclass VPseudoTernaryWithPolicyRoundingMode { - let VLMul = MInfo.value in { + let VLMul = MInfo.value, SEW = sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); let isCommutable = Commutable in def suffix : VPseudoTernaryNoMaskWithPolicyRoundingMode; + TargetConstraintType, + hasSEWOp=hasSEWOp>; def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode, + TargetConstraintType=TargetConstraintType, + hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -3596,10 +3760,14 @@ multiclass VPseudoConversion { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); + defvar hasSEWOp = !eq(sew, 0); let VLMul = MInfo.value, SEW=sew in { - def suffix : VPseudoUnaryNoMask; - def suffix # "_MASK" : VPseudoUnaryMask, + def suffix : VPseudoUnaryNoMask; + def suffix # "_MASK" : VPseudoUnaryMask, RISCVMaskedPseudo; } } @@ -3612,10 +3780,14 @@ multiclass VPseudoConversionRoundingMode { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); - def suffix : VPseudoUnaryNoMaskRoundingMode; + defvar hasSEWOp = !eq(sew, 0); + def suffix : VPseudoUnaryNoMaskRoundingMode; def suffix # "_MASK" : VPseudoUnaryMaskRoundingMode, + TargetConstraintType, + hasSEWOp=hasSEWOp>, RISCVMaskedPseudo; } } @@ -3629,10 +3801,13 @@ multiclass VPseudoConversionRM { let VLMul = MInfo.value, SEW=sew in { defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX); - def suffix : VPseudoUnaryNoMask_FRM; - def suffix # "_MASK" : VPseudoUnaryMask_FRM, + defvar hasSEWOp = !eq(sew, 0); + def suffix : VPseudoUnaryNoMask_FRM; + def suffix # "_MASK" : VPseudoUnaryMask_FRM, RISCVMaskedPseudo; } } @@ -3984,13 +4159,15 @@ class VPatUnaryNoMask( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryNoMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (XLenVT timm:$round), - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (XLenVT timm:$round), + GPR:$vl, log2sew, TU_MU))>; class VPatUnaryMask( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatUnaryMaskRoundingMode( - !if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - inst#"_"#kind#"_"#vlmul.MX#"_MASK")) - (result_type result_reg_class:$merge), - (op2_type op2_reg_class:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatMaskUnaryNoMask; + GPR:$vl)>; class VPatBinaryM : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, TU_MU))>; class VPatBinaryNoMaskRoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type (undef)), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type (IMPLICIT_DEF)), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TA_MA)>; + !if(hasSEWOp, + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TA_MA), + (!cast(inst) + (result_type (IMPLICIT_DEF)), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TA_MA))>; class VPatBinaryNoMaskTURoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst) - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TU_MU), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU))>; // Same as above but source operands are swapped. @@ -4244,18 +4453,25 @@ class VPatBinaryMaskTA : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), (op2_type op2_kind:$rs2), (mask_type V0), VLOpFrag, (XLenVT timm:$policy))), - (!cast(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, (XLenVT timm:$policy)))>; class VPatBinaryMaskTARoundingMode : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (op1_type op1_reg_class:$rs1), @@ -4274,13 +4491,21 @@ class VPatBinaryMaskTARoundingMode(inst#"_MASK") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)))>; // Same as above but source operands are swapped. class VPatBinaryMaskSwapped : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type (undef)), (result_type result_reg_class:$rs1), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst#"_TIED") - (result_type result_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TAIL_AGNOSTIC)>; + !if(hasSEWOp, + (!cast(inst#"_TIED") + (result_type result_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TAIL_AGNOSTIC), + (!cast(inst#"_TIED") + (result_type result_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TAIL_AGNOSTIC))>; class VPatTiedBinaryNoMaskTU : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name) (result_type result_reg_class:$merge), (result_type result_reg_class:$merge), (op2_type op2_kind:$rs2), (XLenVT timm:$round), VLOpFrag)), - (!cast(inst#"_TIED") - (result_type result_reg_class:$merge), - (op2_type op2_kind:$rs2), - (XLenVT timm:$round), - GPR:$vl, sew, TU_MU)>; + !if(hasSEWOp, + (!cast(inst#"_TIED") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, sew, TU_MU), + (!cast(inst#"_TIED") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + (XLenVT timm:$round), + GPR:$vl, TU_MU))>; class VPatTiedBinaryMask : + DAGOperand op2_kind, + bit hasSEWOp = 1> : Pat<(result_type (!cast(intrinsic_name#"_mask") (result_type result_reg_class:$merge), (result_type result_reg_class:$merge), @@ -4411,12 +4651,19 @@ class VPatTiedBinaryMaskRoundingMode(inst#"_MASK_TIED") - (result_type result_reg_class:$merge), - (op2_type op2_kind:$rs2), - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, sew, (XLenVT timm:$policy))>; + !if(hasSEWOp, + (!cast(inst#"_MASK_TIED") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, sew, (XLenVT timm:$policy)), + (!cast(inst#"_MASK_TIED") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)))>; class VPatTernaryNoMask; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryNoMaskWithPolicy(!if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - inst#"_"#kind#"_"#vlmul.MX)) - result_reg_class:$rs3, - (op1_type op1_reg_class:$rs1), - op2_kind:$rs2, - (XLenVT timm:$round), - GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (XLenVT timm:$round), + GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatTernaryMask(!if(isSEWAware, - inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK", - inst#"_"#kind#"_"#vlmul.MX # "_MASK")) - result_reg_class:$rs3, - (op1_type op1_reg_class:$rs1), - op2_kind:$rs2, - (mask_type V0), - (XLenVT timm:$round), - GPR:$vl, log2sew, (XLenVT timm:$policy))>; + !if(isSEWAware, + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK") + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, (XLenVT timm:$policy)), + (!cast(inst#"_"#kind#"_"#vlmul.MX # "_MASK") + result_reg_class:$rs3, + (op1_type op1_reg_class:$rs1), + op2_kind:$rs2, + (mask_type V0), + (XLenVT timm:$round), + GPR:$vl, log2sew, (XLenVT timm:$policy)))>; class VPatTernaryMaskTA; + GPR:$vl, TAIL_AGNOSTIC)>; class VPatTernaryMaskTARoundingMode; + GPR:$vl, TAIL_AGNOSTIC)>; multiclass VPatUnaryS_M { @@ -4807,12 +5065,13 @@ multiclass VPatBinary { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskTU; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTA; + op2_kind, hasSEWOp>; } multiclass VPatBinaryRoundingMode { + DAGOperand op2_kind, + bit hasSEWOp = 1> { def : VPatBinaryNoMaskRoundingMode; + sew, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryNoMaskTURoundingMode; + sew, result_reg_class, op1_reg_class, op2_kind, hasSEWOp>; def : VPatBinaryMaskTARoundingMode; + op2_kind, hasSEWOp>; } multiclass VPatBinarySwapped; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_RM; + vti.RegClass, vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } multiclass VPatBinaryV_VV_INT; + vti.RegClass, vti.RegClass, + hasSEWOp=0>; } } @@ -5011,7 +5274,8 @@ multiclass VPatBinaryV_VV_INT_EEW; + vti.RegClass, ivti.RegClass, + hasSEWOp=0>; } } } @@ -5027,7 +5291,8 @@ multiclass VPatBinaryV_VX; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -5042,7 +5307,8 @@ multiclass VPatBinaryV_VX_RM; + vti.RegClass, vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -5113,7 +5379,8 @@ multiclass VPatBinaryW_VV_RM; + Vti.RegClass, Vti.RegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -5146,7 +5413,8 @@ multiclass VPatBinaryW_VX_RM; + Vti.RegClass, Vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } @@ -5187,26 +5455,32 @@ multiclass VPatBinaryW_WV_RM.Predicates, GetVTypePredicates.Predicates) in { def : VPatTiedBinaryNoMaskRoundingMode; + Vti.Log2SEW, Wti.RegClass, Vti.RegClass, + hasSEWOp=hasSEWOp>; def : VPatBinaryNoMaskTURoundingMode; + Wti.RegClass, Wti.RegClass, Vti.RegClass, + hasSEWOp=hasSEWOp>; let AddedComplexity = 1 in { def : VPatTiedBinaryNoMaskTURoundingMode; + Vti.Log2SEW, Wti.RegClass, Vti.RegClass, + hasSEWOp=hasSEWOp>; def : VPatTiedBinaryMaskRoundingMode; + Vti.Log2SEW, Wti.RegClass, Vti.RegClass, + hasSEWOp=hasSEWOp>; } def : VPatBinaryMaskTARoundingMode; + Wti.RegClass, Vti.RegClass, + hasSEWOp=hasSEWOp>; } } } @@ -5240,7 +5514,8 @@ multiclass VPatBinaryW_WX_RM; + Wti.RegClass, Vti.ScalarRegClass, + hasSEWOp=!not(isSEWAware)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index b4af83a3cbf67..8a3fe0ff908ce 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -34,11 +34,10 @@ multiclass VPatUSLoadStoreSDNode("PseudoVSE"#sew#"_V_"#vlmul.MX); // Load def : Pat<(type (load GPR:$rs1)), - (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, - log2sew, TA_MA)>; + (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, TA_MA)>; // Store def : Pat<(store type:$rs2, GPR:$rs1), - (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; + (store_instr reg_class:$rs2, GPR:$rs1, avl)>; } multiclass VPatUSLoadStoreWholeVRSDNode( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, - avl, log2sew, TA_MA)>; + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VV_RM( - !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - op_reg_class:$rs1, - op_reg_class:$rs2, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_VV_"# vlmul.MX) + (result_type (IMPLICIT_DEF)), + op_reg_class:$rs1, + op_reg_class:$rs2, + FRM_DYN, + avl, log2sew, TA_MA))>; class VPatBinarySDNode_XI( - !if(isSEWAware, - instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#_#suffix#_# vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - xop_kind:$rs2, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, TA_MA), + (!cast(instruction_name#_#suffix#_# vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + xop_kind:$rs2, + avl, log2sew, TA_MA))>; multiclass VPatBinarySDNode_VV_VX vtilist = AllIntegerVectors, @@ -182,14 +191,17 @@ class VPatBinarySDNode_VF : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + avl, log2sew, TA_MA))>; class VPatBinarySDNode_VF_RM : Pat<(result_type (vop (vop_type vop_reg_class:$rs1), (vop_type (SplatFPOp xop_kind:$rs2)))), - (!cast( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) - (result_type (IMPLICIT_DEF)), - vop_reg_class:$rs1, - (xop_type xop_kind:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - avl, log2sew, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + avl, TA_MA), + (!cast(instruction_name#"_"#vlmul.MX) + (result_type (IMPLICIT_DEF)), + vop_reg_class:$rs1, + (xop_type xop_kind:$rs2), + FRM_DYN, + avl, log2sew, TA_MA))>; multiclass VPatBinaryFPSDNode_VV_VF { @@ -252,14 +268,17 @@ multiclass VPatBinaryFPSDNode_R_VF.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatBinaryFPSDNode_R_VF_RM.Predicates in def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)), (fvti.Vector fvti.RegClass:$rs1))), - (!cast( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) - (fvti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, - (fvti.Scalar fvti.ScalarRegClass:$rs2), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TA_MA)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, TA_MA), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs1, + (fvti.Scalar fvti.ScalarRegClass:$rs2), + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TA_MA))>; } multiclass VPatIntegerSetCCSDNode_VV; + FRM_DYN, fvti.AVL, TA_MA)>; } } @@ -443,8 +465,7 @@ multiclass VPatWConvertI2FPSDNode_V(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW) (fwti.Vector (IMPLICIT_DEF)), - ivti.RegClass:$rs1, - ivti.AVL, ivti.Log2SEW, TA_MA)>; + ivti.RegClass:$rs1, ivti.AVL, TA_MA)>; } } @@ -475,8 +496,7 @@ multiclass VPatNConvertI2FPSDNode_W_RM; + FRM_DYN, fvti.AVL, TA_MA)>; } } @@ -624,10 +644,9 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_E"#vti.SEW) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, vti.AVL, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -639,8 +658,7 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + FRM_DYN, vti.AVL, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -650,8 +668,7 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_RM vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + FRM_DYN, vti.AVL, TA_MA)>; } } } @@ -670,9 +687,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM wti.RegClass:$rs2, vti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, - TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), @@ -682,8 +697,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + FRM_DYN, vti.AVL, TA_MA)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) @@ -691,8 +705,7 @@ multiclass VPatWidenBinaryFPSDNode_WV_WF_RM vti.ScalarRegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + FRM_DYN, vti.AVL, TA_MA)>; } } } @@ -720,8 +733,7 @@ multiclass VPatWidenFPMulAccSDNode_VV_VF_RM { wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (wti.Vector (riscv_fpextend_vl_oneuse @@ -732,8 +744,7 @@ multiclass VPatWidenFPMulAccSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; } } } @@ -755,8 +766,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM { wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), @@ -766,8 +776,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), @@ -776,8 +785,7 @@ multiclass VPatWidenFPNegMulAccSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; } } } @@ -799,8 +807,7 @@ multiclass VPatWidenFPMulSacSDNode_VV_VF_RM { wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), @@ -809,8 +816,7 @@ multiclass VPatWidenFPMulSacSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; } } } @@ -832,8 +838,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM { wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))), (fneg (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), @@ -843,8 +848,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue)), @@ -853,8 +857,7 @@ multiclass VPatWidenFPNegMulSacSDNode_VV_VF_RM { wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, vti.AVL, TAIL_AGNOSTIC)>; } } } @@ -1099,7 +1102,7 @@ foreach vtiTowti = AllWidenableIntVectors in { (vti.Mask true_mask), (XLenVT srcvalue))), (!cast("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>; } } @@ -1277,32 +1280,28 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma fvti.RegClass:$rs1, fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFMSUB_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFNMADD_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_VV_"# suffix) fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally // commutable. @@ -1312,16 +1311,14 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))), (!cast("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))), @@ -1329,16 +1326,14 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (SplatFPOp fvti.ScalarRegClass:$rs1), (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; // The splat might be negated. def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), @@ -1347,16 +1342,14 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; def : Pat<(fvti.Vector (any_fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)), fvti.RegClass:$rd, fvti.RegClass:$rs2)), (!cast("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix) fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, fvti.AVL, TAIL_AGNOSTIC)>; } } @@ -1375,41 +1368,40 @@ foreach vti = AllFloatVectors in { vti.RegClass:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - vti.AVL, vti.Log2SEW, TA_MA)>; + FRM_DYN, vti.AVL, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, TA_MA)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2))), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg vti.RegClass:$rs2)))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), (!cast("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, TA_MA)>; } } @@ -1491,8 +1483,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { fwti.RegClass:$rs1, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - fvti.AVL, fvti.Log2SEW, TA_MA)>; + FRM_DYN, fvti.AVL, TA_MA)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 6c6ecb604fd03..0722d5e4244f3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -624,14 +624,17 @@ class VPatBinaryVL_V( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_V_RM( - !if(isSEWAware, - instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - op1_reg_class:$rs1, - op2_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + op1_reg_class:$rs1, + op2_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatTiedBinaryNoMaskVL_V(name) - result_reg_class:$rs1, - op2_reg_class:$rs2, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(name) + result_reg_class:$rs1, + op2_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, GPR:$vl, TAIL_AGNOSTIC), + (!cast(name) + result_reg_class:$rs1, + op2_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, GPR:$vl, log2sew, TAIL_AGNOSTIC))>; // Tail undisturbed def : Pat<(riscv_vmerge_vl true_mask, (result_type (vop @@ -755,13 +771,19 @@ multiclass VPatTiedBinaryNoMaskVL_V_RM(name) - result_reg_class:$rs1, - op2_reg_class:$rs2, - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TU_MU)>; + !if(isSEWAware, + (!cast(name) + result_reg_class:$rs1, + op2_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, GPR:$vl, TU_MU), + (!cast(name) + result_reg_class:$rs1, + op2_reg_class:$rs2, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, GPR:$vl, log2sew, TU_MU))>; } class VPatBinaryVL_XI( - !if(isSEWAware, - instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#_#suffix#_#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - xop_kind:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#_#suffix#_#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + xop_kind:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors, @@ -908,14 +933,17 @@ class VPatBinaryVL_VF( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC))>; class VPatBinaryVL_VF_RM( - !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", - instruction_name#"_"#vlmul.MX#"_MASK")) - result_reg_class:$merge, - vop_reg_class:$rs1, - scalar_reg_class:$rs2, - (mask_type V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, log2sew, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_"#vlmul.MX#"_MASK") + result_reg_class:$merge, + vop_reg_class:$rs1, + scalar_reg_class:$rs2, + (mask_type V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, log2sew, TAIL_AGNOSTIC))>; multiclass VPatBinaryFPVL_VV_VF { @@ -988,13 +1023,15 @@ multiclass VPatBinaryFPVL_R_VF( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -1007,17 +1044,23 @@ multiclass VPatBinaryFPVL_R_VF_RM( - !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) - fvti.RegClass:$merge, - fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, - (fvti.Mask V0), - // Value to indicate no rounding mode change in - // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + !if(isSEWAware, + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, TAIL_AGNOSTIC), + (!cast(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") + fvti.RegClass:$merge, + fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, + (fvti.Mask V0), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC))>; } } @@ -1234,8 +1277,7 @@ multiclass VPatConvertI2FPVL_V_RM; + FRM_DYN, GPR:$vl, TA_MA)>; } } @@ -1249,7 +1291,7 @@ multiclass VPatConvertI2FP_RM_VL_V { VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, - (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; + (ivti.Mask V0), timm:$frm, GPR:$vl, TA_MA)>; } } @@ -1318,7 +1360,7 @@ multiclass VPatWConvertI2FPVL_V(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK") (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, (ivti.Mask V0), - GPR:$vl, ivti.Log2SEW, TA_MA)>; + GPR:$vl, TA_MA)>; } } @@ -1394,8 +1436,7 @@ multiclass VPatNConvertI2FPVL_W_RM; + FRM_DYN, GPR:$vl, TA_MA)>; } } @@ -1410,7 +1451,7 @@ multiclass VPatNConvertI2FP_RM_VL_W { VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, - (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; + (iwti.Mask V0), timm:$frm, GPR:$vl, TA_MA)>; } } @@ -1426,7 +1467,7 @@ multiclass VPatReductionVL { (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), - (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1446,8 +1487,7 @@ multiclass VPatReductionVL_RM (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; + FRM_DYN, GPR:$vl, (XLenVT timm:$policy))>; } } } @@ -1506,7 +1546,7 @@ multiclass VPatWidenReductionVL(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1529,7 +1569,7 @@ multiclass VPatWidenReductionVL_RM; } } @@ -1548,7 +1588,7 @@ multiclass VPatWidenReductionVL_Ext_VL(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), - (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, + (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, (XLenVT timm:$policy))>; } } @@ -1570,8 +1610,7 @@ multiclass VPatWidenReductionVL_Ext_VL_RM; } } @@ -1860,8 +1899,7 @@ multiclass VPatFPMulAddVL_VV_VF_RM; + FRM_DYN, GPR:$vl, TA_MA)>; def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rd, vti.RegClass:$rs2, @@ -1872,8 +1910,7 @@ multiclass VPatFPMulAddVL_VV_VF_RM; + FRM_DYN, GPR:$vl, TA_MA)>; } } } @@ -1927,8 +1964,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TU_MU)>; + FRM_DYN, GPR:$vl, TU_MU)>; def : Pat<(riscv_vmerge_vl (vti.Mask V0), (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), @@ -1938,8 +1974,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TU_MU)>; + FRM_DYN, GPR:$vl, TU_MU)>; def : Pat<(riscv_vmerge_vl (vti.Mask V0), (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), @@ -1949,8 +1984,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(riscv_vmerge_vl (vti.Mask V0), (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), @@ -1960,8 +1994,7 @@ multiclass VPatFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + FRM_DYN, GPR:$vl, TAIL_AGNOSTIC)>; } } } @@ -2006,8 +2039,7 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TA_MA)>; + FRM_DYN, GPR:$vl, TA_MA)>; def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Vector vti.RegClass:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask V0), @@ -2017,8 +2049,7 @@ multiclass VPatWidenFPMulAccVL_VV_VF_RM { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TA_MA)>; + FRM_DYN, GPR:$vl, TA_MA)>; } } } @@ -2491,23 +2522,20 @@ foreach vti = AllFloatVectors in { (vti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, vti.Log2SEW, TA_MA)>; + FRM_DYN, GPR:$vl, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), VLOpFrag), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, - vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, - TA_MA)>; + vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, TA_MA)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, - vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, - TA_MA)>; + vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, TA_MA)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), @@ -2516,8 +2544,7 @@ foreach vti = AllFloatVectors in { VLOpFrag), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, - vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, - TAIL_AGNOSTIC)>; + vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (riscv_fneg_vl vti.RegClass:$rs2, @@ -2528,7 +2555,7 @@ foreach vti = AllFloatVectors in { VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, TA_MA)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (SplatFPOp vti.ScalarRegClass:$rs2), @@ -2537,8 +2564,7 @@ foreach vti = AllFloatVectors in { VLOpFrag), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs1, - vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, - TAIL_AGNOSTIC)>; + vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; // Rounding without exception to implement nearbyint. def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), @@ -2666,8 +2692,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, - (fvti.Mask V0), - GPR:$vl, fvti.Log2SEW, TA_MA)>; + (fvti.Mask V0), GPR:$vl, TA_MA)>; } // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions @@ -2700,8 +2725,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { (fwti.Mask V0), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR - FRM_DYN, - GPR:$vl, fvti.Log2SEW, TA_MA)>; + FRM_DYN, GPR:$vl, TA_MA)>; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in @@ -2710,7 +2734,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { (fwti.Mask V0), VLOpFrag)), (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, - (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; + (fwti.Mask V0), GPR:$vl, TA_MA)>; } } @@ -2854,7 +2878,7 @@ foreach vti = AllIntegerVectors in { VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2889,7 +2913,7 @@ foreach vti = AllIntegerVectors in { VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } @@ -2927,7 +2951,7 @@ foreach vti = AllFloatVectors in { VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$merge, (vti.Mask V0), @@ -2963,7 +2987,7 @@ foreach vti = AllFloatVectors in { VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; + (vti.Mask V0), GPR:$vl, TAIL_AGNOSTIC)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp index 788d8f9cfc853..f5b07a0a7ff4b 100644 --- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp +++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp @@ -100,10 +100,10 @@ static bool vectorPseudoHasAllNBitUsers(const MachineOperand &UserOp, const MCInstrDesc &MCID = MI.getDesc(); const uint64_t TSFlags = MCID.TSFlags; - if (!RISCVII::hasSEWOp(TSFlags)) + if (!RISCVII::hasSEW(TSFlags)) return false; assert(RISCVII::hasVLOp(TSFlags)); - const unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MCID)).getImm(); + const unsigned Log2SEW = RISCVII::getSEWOp(MI).getImm(); if (UserOp.getOperandNo() == RISCVII::getVLOpNum(MCID)) return false; diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir index a54da97d2548a..0a08358194b2d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -41,7 +41,7 @@ body: | ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 1 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) + ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 undef renamable $v8, killed renamable $x10, $noreg, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; CHECK-NEXT: $x10 = PseudoReadVLENB ; CHECK-NEXT: $x10 = SLLI killed $x10, 1 ; CHECK-NEXT: $x10 = SUB $x8, killed $x10 @@ -58,7 +58,7 @@ body: | %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 %pt:vr = IMPLICIT_DEF - %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 6, 0 :: (load unknown-size from %ir.pa, align 8) + %2:vr = PseudoVLE64_V_M1 %pt, %0, %1, 0 :: (load unknown-size from %ir.pa, align 8) %3:gpr = ADDI %stack.2, 0 VS1R_V killed %2:vr, %3:gpr PseudoRET diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir index eb4c8bfdd67f9..dd40005b074dc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir +++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir @@ -69,7 +69,7 @@ body: | bb.4.entry: %33:vr = PHI %31, %bb.2, %25, %bb.3 - PseudoVSE64_V_M1 killed %33, %2, 1, 6 /* e64 */ + PseudoVSE64_V_M1 killed %33, %2, 1 PseudoRET ... diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir index 5221fa73525cc..0570bdf8590fa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir +++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir @@ -128,9 +128,9 @@ body: | SD killed renamable $x13, %stack.1, 0, debug-location !8 DBG_VALUE %stack.1, $noreg, !11, !DIExpression(DW_OP_deref), debug-location !8 - PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, 5, debug-location !DILocation(line: 5, column: 1, scope: !5) + PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, debug-location !DILocation(line: 5, column: 1, scope: !5) DBG_VALUE %stack.2, $noreg, !12, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 5, column: 1, scope: !5) - PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, 5, debug-location !DILocation(line: 6, column: 1, scope: !5) + PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, debug-location !DILocation(line: 6, column: 1, scope: !5) DBG_VALUE %stack.3, $noreg, !13, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 6, column: 1, scope: !5) PseudoVSM_V_B64 killed renamable $v0, %stack.4, 8, 0, debug-location !DILocation(line: 2, column: 1, scope: !5) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll index a4851e9838fbf..ab9ecc0507b24 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll @@ -9,7 +9,7 @@ define <2 x double> @foo(<2 x double> %x, <2 x double> %y) { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVFADD_VV_M1_E64_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1_E64 $noreg, [[COPY1]], [[COPY]], 7, 2, 6 /* e64 */, 1 /* ta, mu */, implicit $frm + ; CHECK-NEXT: [[PseudoVFADD_VV_M1_E64_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1_E64 $noreg, [[COPY1]], [[COPY]], 7, 2, 1 /* ta, mu */, implicit $frm ; CHECK-NEXT: $v8 = COPY [[PseudoVFADD_VV_M1_E64_]] ; CHECK-NEXT: PseudoRET implicit $v8 %1 = fadd fast <2 x double> %x, %y diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll index 5c592dd1a2d68..e5f45cb5f9cdb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -17,7 +17,7 @@ define i64 @test( %0) nounwind { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI %stack.0.a, 0 - ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1, 6 /* e64 */ + ; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1 :: (store unknown-size into %ir.b, align 8) ; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a) ; CHECK-NEXT: $x10 = COPY [[LD]] ; CHECK-NEXT: PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll index 292f1deb2cce8..f2004be89076a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -12,7 +12,7 @@ define @vpload_nxv8i64(ptr %ptr, %m, i32 ze ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 6 /* e64 */, 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK $noreg, [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size from %ir.ptr, align 64) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %load = call @llvm.vp.load.nxv8i64.p0(ptr %ptr, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll index 8457f3d2c149c..ac2a06dda7c7e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pass-fast-math-flags-sdnode.ll @@ -15,7 +15,7 @@ define @foo( %x, @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir index feadfc627b5c0..17cf5ac80777a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir +++ b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir @@ -11,20 +11,20 @@ body: | ; CHECK: liveins: $x10 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: undef %1.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: %pt2:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: %1.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: %pt3:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0 /* tu, mu */ ; CHECK-NEXT: undef early-clobber %5.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %5.sub_vrm2_0, %1.sub_vrm2_0, 0, 1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: %5.sub_vrm2_1:vrn2m2 = COPY %1.sub_vrm2_1 ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %5, $x10, [[PseudoVLE32_V_M2_]], 1, 5 /* e32 */, implicit $vl, implicit $vtype %pt:vrm2 = IMPLICIT_DEF - undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 5, 0 + undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 %pt, $x10, 1, 0 %pt2:vrm2 = IMPLICIT_DEF - %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 5, 0 + %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 %pt2, $x10, 1, 0 %pt3:vrm2 = IMPLICIT_DEF - %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 5, 0 + %1:vrm2 = PseudoVLE32_V_M2 %pt3, $x10, 1, 0 undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 undef %2.sub_vrm2_0, %0.sub_vrm2_0:vrn2m2, 0, 1, 5, 0, implicit $vl, implicit $vtype %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1:vrn2m2 PseudoVSUXSEG2EI32_V_M2_M2 %2:vrn2m2, $x10, %1:vrm2, 1, 5, implicit $vl, implicit $vtype diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll index 31fd5bdbd31fd..4c80d554893f9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll @@ -16,7 +16,7 @@ define void @vpmerge_vpload_store( %passthru, ptr %p, ) into %ir.p) ; CHECK-NEXT: PseudoRET %a = call @llvm.vp.load.nxv2i32.p0(ptr %p, splat (i1 -1), i32 %vl) @@ -35,7 +35,7 @@ define void @vpselect_vpload_store( %passthru, ptr %p, ) into %ir.p) ; CHECK-NEXT: PseudoRET %a = call @llvm.vp.load.nxv2i32.p0(ptr %p, splat (i1 -1), i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll index a8934bb25571c..05fc39690d532 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload-vpstore-output.ll @@ -17,7 +17,7 @@ define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 1 /* ta, mu */ :: (load unknown-size, align 1) + ; CHECK-NEXT: [[PseudoVLSE8_V_MF8_MASK:%[0-9]+]]:vrnov0 = PseudoVLSE8_V_MF8_MASK $noreg, [[COPY3]], [[COPY2]], $v0, [[COPY]], 1 /* ta, mu */ :: (load unknown-size, align 1) ; CHECK-NEXT: $v8 = COPY [[PseudoVLSE8_V_MF8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8 %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, %m, i32 %evl) @@ -37,7 +37,7 @@ define void @strided_vpstore_nxv1i8_i8( %val, ptr %ptr, i8 sign ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: $v0 = COPY [[COPY1]] - ; CHECK-NEXT: PseudoVSSE8_V_MF8_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */ :: (store unknown-size, align 1) + ; CHECK-NEXT: PseudoVSSE8_V_MF8_MASK [[COPY4]], [[COPY3]], [[COPY2]], $v0, [[COPY]] :: (store unknown-size, align 1) ; CHECK-NEXT: PseudoRET call void @llvm.experimental.vp.strided.store.nxv1i8.p0.i8( %val, ptr %ptr, i8 %stride, %m, i32 %evl) ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir index 9cafb323dc65c..9dd3f02e9e573 100644 --- a/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir +++ b/llvm/test/CodeGen/RISCV/rvv/subregister-undef-early-clobber.mir @@ -10,7 +10,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -20,20 +20,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5/* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -48,7 +48,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -58,20 +58,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -86,7 +86,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -96,20 +96,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -124,7 +124,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -134,20 +134,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm1_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -162,7 +162,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -170,20 +170,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -198,7 +198,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm4 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm4 = IMPLICIT_DEF @@ -206,20 +206,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm4 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: early-clobber %6:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M4 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm4 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm4 = INSERT_SUBREG %1:vrm4, %5, %subreg.sub_vrm2_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm4 = IMPLICIT_DEF early-clobber %0:vrm4 = PseudoVRGATHER_VI_M4 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M4 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M4 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -235,7 +235,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -247,20 +247,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -275,7 +275,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -287,20 +287,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -315,7 +315,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -327,20 +327,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -355,7 +355,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -367,20 +367,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_2 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -395,7 +395,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_4 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -407,20 +407,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_5 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_4 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -435,7 +435,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_5 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -447,20 +447,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_4 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_5 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -475,7 +475,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_6 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -487,20 +487,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_7 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_6 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -515,7 +515,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M1_]], %subreg.sub_vrm1_7 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -527,20 +527,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG2]], [[PseudoRVVInitUndefM1_]], %subreg.sub_vrm1_6 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG3]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vr = IMPLICIT_DEF - %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 5, 0 + %5:vr = PseudoVLE32_V_M1 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm1_7 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -555,7 +555,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -565,20 +565,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -593,7 +593,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -603,20 +603,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -631,7 +631,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_2 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -641,20 +641,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_3 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_2 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -669,7 +669,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M2_]], %subreg.sub_vrm2_3 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -679,20 +679,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG1]], [[PseudoRVVInitUndefM2_]], %subreg.sub_vrm2_2 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG2]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm2 = IMPLICIT_DEF - %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm2 = PseudoVLE32_V_M2 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm2_3 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -707,7 +707,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_0 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -715,20 +715,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_1 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm4 = IMPLICIT_DEF - %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_0 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 @@ -743,7 +743,7 @@ body: | ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8 ; CHECK-NEXT: %pt:vrm4 = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 5 /* e32 */, 0 /* tu, mu */ + ; CHECK-NEXT: [[PseudoVLE32_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32_V_M4 %pt, killed [[ADDI]], 0, 0 /* tu, mu */ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vrm8 = INSERT_SUBREG [[DEF]], [[PseudoVLE32_V_M4_]], %subreg.sub_vrm4_1 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %pt2:vrm8 = IMPLICIT_DEF @@ -751,20 +751,20 @@ body: | ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:vrm8 = INSERT_SUBREG [[INSERT_SUBREG]], [[PseudoRVVInitUndefM4_]], %subreg.sub_vrm4_0 ; CHECK-NEXT: early-clobber %6:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed [[INSERT_SUBREG1]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI $x0, 0 - ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE32_V_M8 killed %6, killed [[ADDI1]], 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: $x10 = COPY [[COPY]] ; CHECK-NEXT: PseudoRET implicit $x10 %1:vrm8 = IMPLICIT_DEF %7:gpr = ADDI $x0, 8 %pt:vrm4 = IMPLICIT_DEF - %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 5, 0 + %5:vrm4 = PseudoVLE32_V_M4 %pt, killed %7:gpr, 0, 0 %6:vrm8 = INSERT_SUBREG %1:vrm8, %5, %subreg.sub_vrm4_1 dead $x0 = PseudoVSETIVLI 0, 210 /* e32, m4, ta, ma */, implicit-def $vl, implicit-def $vtype %pt2:vrm8 = IMPLICIT_DEF early-clobber %0:vrm8 = PseudoVRGATHER_VI_M8 %pt2, killed %6, 0, 0, 5 /* e32 */, 0, implicit $vl, implicit $vtype %2:gpr = ADDI $x0, 0 - PseudoVSE32_V_M8 killed %0, killed %2, 0, 5 /* e32 */, implicit $vl, implicit $vtype + PseudoVSE32_V_M8 killed %0, killed %2, 0, implicit $vl, implicit $vtype %3:gpr = COPY $x0 $x10 = COPY %3 PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir index 89b756818e7f5..83135734e0906 100644 --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -7,7 +7,6 @@ # set. --- | - ; ModuleID = 'test.ll' source_filename = "test.ll" target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" @@ -53,7 +52,7 @@ body: | ; CHECK-NEXT: $v0 = COPY [[COPY]] ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 1 /* ta, mu */ :: (load (s512) from %ir.a, align 8) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %1:vr = COPY $v0 @@ -61,7 +60,7 @@ body: | $v0 = COPY %1 %3:vrm8 = IMPLICIT_DEF %4:vrm8nov0 = COPY %3 - %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8) + %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 1 :: (load (s512) from %ir.a, align 8) $v8m8 = COPY %2 PseudoRET implicit $v8m8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll index 15cb42bacf173..59410e6a5251c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll @@ -14,7 +14,7 @@ define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) { ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -31,7 +31,7 @@ define i64 @test_vleff_nxv8i8_tu( %merge, ptr %p, i64 %vl) { ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) + ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -50,7 +50,7 @@ define i64 @test_vleff_nxv8i8_mask( %maskedoff, ptr %p, %val, ptr %base, i64 %vl, ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 - ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) + ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: @@ -109,7 +109,7 @@ define i64 @test_vlseg2ff_nxv8i8_mask( %val, ptr %base,