Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 15 additions & 15 deletions llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Original file line number Diff line number Diff line change
Expand Up @@ -2982,21 +2982,21 @@ multiclass VPseudoVFWALU_WV_WF_RM {
multiclass VPseudoVMRG_VM_XM_IM {
foreach m = MxList in {
defvar mx = m.MX;
def "_VVM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
forcePassthruRead=true>;
def "_VXM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
forcePassthruRead=true>;
def "_VIM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
forcePassthruRead=true>;
def "_VVM"#"_"#m.MX : VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
forcePassthruRead = true>;
def "_VXM"#"_"#m.MX
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
forcePassthruRead = true>;
def "_VIM"#"_"#m.MX
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
forcePassthruRead = true>;
}
}

Expand Down
53 changes: 25 additions & 28 deletions llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class RISCVVectorPeephole : public MachineFunctionPass {
bool isAllOnesMask(const MachineInstr *MaskDef) const;
std::optional<unsigned> getConstant(const MachineOperand &VL) const;
bool ensureDominates(const MachineOperand &Use, MachineInstr &Src) const;
bool isKnownSameDefs(Register A, Register B) const;
Register lookThruCopies(Register Reg) const;
};

} // namespace
Expand Down Expand Up @@ -387,23 +387,18 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const {
return true;
}

bool RISCVVectorPeephole::isKnownSameDefs(Register A, Register B) const {
if (A.isPhysical() || B.isPhysical())
return false;

auto LookThruVirtRegCopies = [this](Register Reg) {
while (MachineInstr *Def = MRI->getUniqueVRegDef(Reg)) {
if (!Def->isFullCopy())
break;
Register Src = Def->getOperand(1).getReg();
if (!Src.isVirtual())
break;
Reg = Src;
}
return Reg;
};

return LookThruVirtRegCopies(A) == LookThruVirtRegCopies(B);
// If \p Reg is defined by one or more COPYs of virtual registers, traverses
// the chain and returns the root non-COPY source.
Register RISCVVectorPeephole::lookThruCopies(Register Reg) const {
while (MachineInstr *Def = MRI->getUniqueVRegDef(Reg)) {
if (!Def->isFullCopy())
break;
Register Src = Def->getOperand(1).getReg();
if (!Src.isVirtual())
break;
Reg = Src;
}
return Reg;
}

/// If a PseudoVMERGE_VVM's true operand is a masked pseudo and both have the
Expand All @@ -428,10 +423,11 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) {
if (!TrueMaskedInfo || !hasSameEEW(MI, *True))
return false;

const MachineOperand &TrueMask =
True->getOperand(TrueMaskedInfo->MaskOpIdx + True->getNumExplicitDefs());
const MachineOperand &MIMask = MI.getOperand(4);
if (!isKnownSameDefs(TrueMask.getReg(), MIMask.getReg()))
Register TrueMaskReg = lookThruCopies(
True->getOperand(TrueMaskedInfo->MaskOpIdx + True->getNumExplicitDefs())
.getReg());
Register MIMaskReg = lookThruCopies(MI.getOperand(4).getReg());
if (!TrueMaskReg.isVirtual() || TrueMaskReg != MIMaskReg)
return false;

// Masked off lanes past TrueVL will come from False, and converting to vmv
Expand Down Expand Up @@ -717,9 +713,9 @@ bool RISCVVectorPeephole::foldVMergeToMask(MachineInstr &MI) const {
if (RISCV::getRVVMCOpcode(MI.getOpcode()) != RISCV::VMERGE_VVM)
return false;

Register PassthruReg = MI.getOperand(1).getReg();
Register FalseReg = MI.getOperand(2).getReg();
Register TrueReg = MI.getOperand(3).getReg();
Register PassthruReg = lookThruCopies(MI.getOperand(1).getReg());
Register FalseReg = lookThruCopies(MI.getOperand(2).getReg());
Register TrueReg = lookThruCopies(MI.getOperand(3).getReg());
if (!TrueReg.isVirtual() || !MRI->hasOneUse(TrueReg))
return false;
MachineInstr &True = *MRI->getUniqueVRegDef(TrueReg);
Expand All @@ -740,16 +736,17 @@ bool RISCVVectorPeephole::foldVMergeToMask(MachineInstr &MI) const {

// We require that either passthru and false are the same, or that passthru
// is undefined.
if (PassthruReg && !isKnownSameDefs(PassthruReg, FalseReg))
if (PassthruReg && !(PassthruReg.isVirtual() && PassthruReg == FalseReg))
return false;

std::optional<std::pair<unsigned, unsigned>> NeedsCommute;

// If True has a passthru operand then it needs to be the same as vmerge's
// False, since False will be used for the result's passthru operand.
Register TruePassthru = True.getOperand(True.getNumExplicitDefs()).getReg();
Register TruePassthru =
lookThruCopies(True.getOperand(True.getNumExplicitDefs()).getReg());
if (RISCVII::isFirstDefTiedToFirstUse(True.getDesc()) && TruePassthru &&
!isKnownSameDefs(TruePassthru, FalseReg)) {
!(TruePassthru.isVirtual() && TruePassthru == FalseReg)) {
// If True's passthru != False, check if it uses False in another operand
// and try to commute it.
int OtherIdx = True.findRegisterUseOperandIdx(FalseReg, TRI);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv1i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv1i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
Expand All @@ -40,15 +40,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv4i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv4i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
Expand All @@ -69,15 +69,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv16i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: select_nxv16i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
Expand All @@ -98,15 +98,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv64i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv64i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
Expand All @@ -127,15 +127,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv2i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv2i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
Expand All @@ -156,15 +156,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv8i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: select_nxv8i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
Expand All @@ -185,15 +185,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv32i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv32i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
Expand All @@ -214,15 +214,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv2i32
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: select_nxv2i32
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
Expand All @@ -243,15 +243,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv8i32
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: select_nxv8i32
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
Expand All @@ -272,15 +272,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv1i64
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: select_nxv1i64
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
Expand All @@ -301,15 +301,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv4i64
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: select_nxv4i64
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
Original file line number Diff line number Diff line change
Expand Up @@ -311,10 +311,10 @@ define i32 @test_nxv128i1(<vscale x 128 x i1> %x) {
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v6, a0
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v7, a0
; CHECK-NEXT: vslidedown.vx v5, v6, a0
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/copyprop.mir
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ body: |
%2:gpr = COPY $x11
%1:gpr = COPY $x10
%3:vr = COPY $v8
%17:vr = PseudoVSLL_VI_M1 undef $noreg, %3, 5, 1, 6 /* e64 */, 0
%17:vrnov0 = PseudoVSLL_VI_M1 undef $noreg, %3, 5, 1, 6 /* e64 */, 0
%22:vr = PseudoVMSNE_VI_M1 %3, 0, 1, 6 /* e64 */
%23:vmv0 = COPY %22
%25:vrnov0 = PseudoVMERGE_VIM_M1 undef $noreg, %17, -1, %23, 1, 6 /* e64 */
Expand Down
Loading
Loading