diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index c76a330b08aa8..6c5712dc795bc 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2193,11 +2193,51 @@ MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI, .setMIFlags(MemI.getFlags()); } +bool RISCVInstrInfo::getMemOperandsWithOffsetWidth( + const MachineInstr &LdSt, SmallVectorImpl &BaseOps, + int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, + const TargetRegisterInfo *TRI) const { + if (!LdSt.mayLoadOrStore()) + return false; + + // Conservatively, only handle scalar loads/stores for now. + switch (LdSt.getOpcode()) { + case RISCV::LB: + case RISCV::LBU: + case RISCV::SB: + case RISCV::LH: + case RISCV::LHU: + case RISCV::FLH: + case RISCV::SH: + case RISCV::FSH: + case RISCV::LW: + case RISCV::LWU: + case RISCV::FLW: + case RISCV::SW: + case RISCV::FSW: + case RISCV::LD: + case RISCV::FLD: + case RISCV::SD: + case RISCV::FSD: + break; + default: + return false; + } + const MachineOperand *BaseOp; + OffsetIsScalable = false; + if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI)) + return false; + BaseOps.push_back(BaseOp); + return true; +} + // Set BaseReg (the base register operand), Offset (the byte offset being // accessed) and the access Width of the passed instruction that reads/writes // memory. Returns false if the instruction does not read/write memory or the // BaseReg/Offset/Width can't be determined. Is not guaranteed to always // recognise base operands and offsets in all cases. +// TODO: Add an IsScalable bool ref argument (like the equivalent AArch64 +// function) and set it as appropriate. bool RISCVInstrInfo::getMemOperandWithOffsetWidth( const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index 0eac8d1e1b1af..8f860077c3031 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -152,6 +152,11 @@ class RISCVInstrInfo : public RISCVGenInstrInfo { MachineInstr *emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override; + bool getMemOperandsWithOffsetWidth( + const MachineInstr &MI, SmallVectorImpl &BaseOps, + int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, + const TargetRegisterInfo *TRI) const override; + bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, diff --git a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp index fd5f17e0185e4..43e98d703d7a3 100644 --- a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp +++ b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp @@ -92,6 +92,69 @@ TEST_P(RISCVInstrInfoTest, IsAddImmediate) { } } +TEST_P(RISCVInstrInfoTest, GetMemOperandsWithOffsetWidth) { + const RISCVInstrInfo *TII = ST->getInstrInfo(); + const TargetRegisterInfo *TRI = ST->getRegisterInfo(); + DebugLoc DL; + + SmallVector BaseOps; + unsigned Width; + int64_t Offset; + bool OffsetIsScalable; + + auto MMO = MF->getMachineMemOperand(MachinePointerInfo(), + MachineMemOperand::MOLoad, 1, Align(1)); + MachineInstr *MI = BuildMI(*MF, DL, TII->get(RISCV::LB), RISCV::X1) + .addReg(RISCV::X2) + .addImm(-128) + .addMemOperand(MMO) + .getInstr(); + bool Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset, + OffsetIsScalable, Width, TRI); + ASSERT_TRUE(Res); + ASSERT_EQ(BaseOps.size(), 1u); + ASSERT_TRUE(BaseOps.front()->isReg()); + EXPECT_EQ(BaseOps.front()->getReg(), RISCV::X2); + EXPECT_EQ(Offset, -128); + EXPECT_FALSE(OffsetIsScalable); + EXPECT_EQ(Width, 1u); + + BaseOps.clear(); + MMO = MF->getMachineMemOperand(MachinePointerInfo(), + MachineMemOperand::MOStore, 4, Align(4)); + MI = BuildMI(*MF, DL, TII->get(RISCV::FSW)) + .addReg(RISCV::F3_F) + .addReg(RISCV::X3) + .addImm(36) + .addMemOperand(MMO); + Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset, + OffsetIsScalable, Width, TRI); + ASSERT_TRUE(Res); + ASSERT_EQ(BaseOps.size(), 1u); + ASSERT_TRUE(BaseOps.front()->isReg()); + EXPECT_EQ(BaseOps.front()->getReg(), RISCV::X3); + EXPECT_EQ(Offset, 36); + EXPECT_FALSE(OffsetIsScalable); + EXPECT_EQ(Width, 4u); + + BaseOps.clear(); + MMO = MF->getMachineMemOperand(MachinePointerInfo(), + MachineMemOperand::MOStore, 16, Align(16)); + MI = BuildMI(*MF, DL, TII->get(RISCV::PseudoVLE32_V_M1), RISCV::V8) + .addReg(RISCV::X3) + .addMemOperand(MMO); + Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset, + OffsetIsScalable, Width, TRI); + ASSERT_FALSE(Res); // Vector loads/stored are not handled for now. + + BaseOps.clear(); + MI = BuildMI(*MF, DL, TII->get(RISCV::ADDI), RISCV::X4) + .addReg(RISCV::X5) + .addImm(16); + Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset, + OffsetIsScalable, Width, TRI); +} + } // namespace INSTANTIATE_TEST_SUITE_P(RV32And64, RISCVInstrInfoTest,