diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index 4877f43e8578d..bd72ac23fc9c0 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -1364,6 +1364,10 @@ class MachineInstr return getOpcode() == TargetOpcode::INLINEASM || getOpcode() == TargetOpcode::INLINEASM_BR; } + /// Returns true if the register operand can be folded with a load or store + /// into a frame index. Does so by checking the InlineAsm::Flag immediate + /// operand at OpId - 1. + bool mayFoldInlineAsmRegOp(unsigned OpId) const; bool isStackAligningInlineAsm() const; InlineAsm::AsmDialect getInlineAsmDialect() const; diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h index 969ad42816a7e..e5f506e5694da 100644 --- a/llvm/include/llvm/IR/InlineAsm.h +++ b/llvm/include/llvm/IR/InlineAsm.h @@ -291,18 +291,23 @@ class InlineAsm final : public Value { // Bits 30-16 - A ConstraintCode:: value indicating the original // constraint code. (MemConstraintCode) // Else: - // Bits 30-16 - The register class ID to use for the operand. (RegClass) + // Bits 29-16 - The register class ID to use for the operand. (RegClass) + // Bit 30 - If the register is permitted to be spilled. + // (RegMayBeFolded) + // Defaults to false "r", may be set for constraints like + // "rm" (or "g"). // - // As such, MatchedOperandNo, MemConstraintCode, and RegClass are views of - // the same slice of bits, but are mutually exclusive depending on the - // fields IsMatched then KindField. + // As such, MatchedOperandNo, MemConstraintCode, and + // (RegClass+RegMayBeFolded) are views of the same slice of bits, but are + // mutually exclusive depending on the fields IsMatched then KindField. class Flag { uint32_t Storage; using KindField = Bitfield::Element; using NumOperands = Bitfield::Element; using MatchedOperandNo = Bitfield::Element; using MemConstraintCode = Bitfield::Element; - using RegClass = Bitfield::Element; + using RegClass = Bitfield::Element; + using RegMayBeFolded = Bitfield::Element; using IsMatched = Bitfield::Element; @@ -413,6 +418,26 @@ class InlineAsm final : public Value { "Flag is not a memory or function constraint!"); Bitfield::set(Storage, ConstraintCode::Unknown); } + + /// Set a bit to denote that while this operand is some kind of register + /// (use, def, ...), a memory flag did appear in the original constraint + /// list. This is set by the instruction selection framework, and consumed + /// by the register allocator. While the register allocator is generally + /// responsible for spilling registers, we need to be able to distinguish + /// between registers that the register allocator has permission to fold + /// ("rm") vs ones it does not ("r"). This is because the inline asm may use + /// instructions which don't support memory addressing modes for that + /// operand. + void setRegMayBeFolded(bool B) { + assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) && + "Must be reg"); + Bitfield::set(Storage, B); + } + bool getRegMayBeFolded() const { + assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) && + "Must be reg"); + return Bitfield::get(Storage); + } }; static std::vector getExtraInfoNames(unsigned ExtraInfo) { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 048563cc2bcc4..9e7b4df2576fe 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -1792,6 +1792,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, if (F.isUseOperandTiedToDef(TiedTo)) OS << " tiedto:$" << TiedTo; + if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || + F.isRegUseKind()) && + F.getRegMayBeFolded()) { + OS << " foldable"; + } + OS << ']'; // Compute the index of the next operand descriptor. @@ -2526,3 +2532,20 @@ void MachineInstr::insert(mop_iterator InsertBefore, tieOperands(Tie1, Tie2); } } + +bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const { + assert(OpId && "expected non-zero operand id"); + assert(isInlineAsm() && "should only be used on inline asm"); + + if (!getOperand(OpId).isReg()) + return false; + + const MachineOperand &MD = getOperand(OpId - 1); + if (!MD.isImm()) + return false; + + InlineAsm::Flag F(MD.getImm()); + if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) + return F.getRegMayBeFolded(); + return false; +} diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index fe7efb73a2dce..3013a768bc4d5 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1639,6 +1639,10 @@ std::string TargetInstrInfo::createMIROperandComment( if (F.isUseOperandTiedToDef(TiedTo)) OS << " tiedto:$" << TiedTo; + if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) && + F.getRegMayBeFolded()) + OS << " foldable"; + return OS.str(); }