diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index 6074d999f97fff..06119f32aedc6f 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -722,41 +722,43 @@ class CmpInst : public Instruction { /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of /// FCMP_* values. Changing the bit patterns requires a potential change to /// those passes. - enum Predicate { - // Opcode U L G E Intuitive operation - FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded) - FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal - FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than - FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal - FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than - FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal - FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal - FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans) - FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y) - FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal - FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than - FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal - FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than - FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal - FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal - FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded) + enum Predicate : unsigned { + // Opcode U L G E Intuitive operation + FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded) + FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal + FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than + FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal + FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than + FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal + FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal + FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans) + FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y) + FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal + FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than + FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal + FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than + FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal + FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal + FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded) FIRST_FCMP_PREDICATE = FCMP_FALSE, LAST_FCMP_PREDICATE = FCMP_TRUE, BAD_FCMP_PREDICATE = FCMP_TRUE + 1, - ICMP_EQ = 32, ///< equal - ICMP_NE = 33, ///< not equal - ICMP_UGT = 34, ///< unsigned greater than - ICMP_UGE = 35, ///< unsigned greater or equal - ICMP_ULT = 36, ///< unsigned less than - ICMP_ULE = 37, ///< unsigned less or equal - ICMP_SGT = 38, ///< signed greater than - ICMP_SGE = 39, ///< signed greater or equal - ICMP_SLT = 40, ///< signed less than - ICMP_SLE = 41, ///< signed less or equal + ICMP_EQ = 32, ///< equal + ICMP_NE = 33, ///< not equal + ICMP_UGT = 34, ///< unsigned greater than + ICMP_UGE = 35, ///< unsigned greater or equal + ICMP_ULT = 36, ///< unsigned less than + ICMP_ULE = 37, ///< unsigned less or equal + ICMP_SGT = 38, ///< signed greater than + ICMP_SGE = 39, ///< signed greater or equal + ICMP_SLT = 40, ///< signed less than + ICMP_SLE = 41, ///< signed less or equal FIRST_ICMP_PREDICATE = ICMP_EQ, LAST_ICMP_PREDICATE = ICMP_SLE, BAD_ICMP_PREDICATE = ICMP_SLE + 1 }; + using PredicateField = + Bitfield::Element; // Next bit:6 protected: CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, @@ -797,12 +799,10 @@ class CmpInst : public Instruction { } /// Return the predicate for this instruction. - Predicate getPredicate() const { - return Predicate(getSubclassDataFromInstruction()); - } + Predicate getPredicate() const { return getSubclassData(); } /// Set the predicate for this instruction to the specified value. - void setPredicate(Predicate P) { setInstructionSubclassData(P); } + void setPredicate(Predicate P) { setSubclassData(P); } static bool isFPPredicate(Predicate P) { return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE; @@ -1096,6 +1096,11 @@ using ConstOperandBundleDef = OperandBundleDefT; /// subclass requires. Note that accessing the end of the argument list isn't /// as cheap as most other operations on the base class. class CallBase : public Instruction { + // The first two bits are reserved by CallInst for fast retrieving, + using CallInstReservedField = Bitfield::Element; // Next bit:2 + using CallingConvField = Bitfield::Element; // Next bit:12 + protected: /// The last operand is the called operand. static constexpr int CalledOperandOpEndIdx = -1; @@ -1349,14 +1354,11 @@ class CallBase : public Instruction { } CallingConv::ID getCallingConv() const { - return static_cast(getSubclassDataFromInstruction() >> 2); + return getSubclassData(); } void setCallingConv(CallingConv::ID CC) { - auto ID = static_cast(CC); - assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention"); - setInstructionSubclassData((getSubclassDataFromInstruction() & 3) | - (ID << 2)); + setSubclassData(CC); } /// Check if this call is an inline asm statement. diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h index e604c6608e8651..ba0f4566c8aaf2 100644 --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -15,6 +15,7 @@ #define LLVM_IR_INSTRUCTION_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Bitfields.h" #include "llvm/ADT/None.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/ilist_node.h" @@ -49,11 +50,14 @@ class Instruction : public User, /// O(1) local dominance checks between instructions. mutable unsigned Order = 0; - enum { - /// This is a bit stored in the SubClassData field which indicates whether - /// this instruction has metadata attached to it or not. - HasMetadataBit = 1 << 15 - }; +protected: + // The 15 first bits of `Value::SubclassData` are available for subclasses of + // `Instruction` to use. + using OpaqueField = Bitfield::Element; // Next bit:15 +private: + // The last bit is used to store whether the instruction has metadata attached + // or not. + using HasMetadataField = Bitfield::Element; protected: ~Instruction(); // Use deleteValue() to delete a generic Instruction. @@ -471,7 +475,7 @@ class Instruction : public User, private: /// Return true if we have an entry in the on-the-side metadata hash. bool hasMetadataHashEntry() const { - return (getSubclassDataFromValue() & HasMetadataBit) != 0; + return Bitfield::test(getSubclassDataFromValue()); } // These are all implemented in Metadata.cpp. @@ -763,10 +767,7 @@ class Instruction : public User, return Value::getSubclassDataFromValue(); } - void setHasMetadataHashEntry(bool V) { - setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) | - (V ? HasMetadataBit : 0)); - } + void setHasMetadataHashEntry(bool V) { setSubclassData(V); } void setParent(BasicBlock *P); @@ -774,14 +775,24 @@ class Instruction : public User, // Instruction subclasses can stick up to 15 bits of stuff into the // SubclassData field of instruction with these members. - // Verify that only the low 15 bits are used. - void setInstructionSubclassData(unsigned short D) { - assert((D & HasMetadataBit) == 0 && "Out of range value put into field"); - setValueSubclassData((getSubclassDataFromValue() & HasMetadataBit) | D); - } - - unsigned getSubclassDataFromInstruction() const { - return getSubclassDataFromValue() & ~HasMetadataBit; + template + typename BitfieldElement::Type getSubclassData() const { + static_assert( + std::is_same::value || + !Bitfield::isOverlapping(), + "Must not overlap with the metadata bit"); + return Bitfield::get(getSubclassDataFromValue()); + } + + template + void setSubclassData(typename BitfieldElement::Type Value) { + static_assert( + std::is_same::value || + !Bitfield::isOverlapping(), + "Must not overlap with the metadata bit"); + auto Storage = getSubclassDataFromValue(); + Bitfield::set(Storage, Value); + setValueSubclassData(Storage); } Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index caa24f4b1a7880..f719d0c1bee923 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -16,6 +16,7 @@ #define LLVM_IR_INSTRUCTIONS_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Bitfields.h" #include "llvm/ADT/None.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" @@ -59,6 +60,10 @@ class LLVMContext; class AllocaInst : public UnaryInstruction { Type *AllocatedType; + using AlignmentField = Bitfield::Element; // Next bit:5 + using UsedWithInAllocaField = Bitfield::Element; // Next bit:6 + using SwiftErrorField = Bitfield::Element; // Next bit:7 + protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; @@ -108,7 +113,7 @@ class AllocaInst : public UnaryInstruction { /// Return the alignment of the memory that is being allocated by the /// instruction. Align getAlign() const { - return *decodeMaybeAlign(getSubclassDataFromInstruction() & 31); + return *decodeMaybeAlign(getSubclassData()); } // FIXME: Remove this one transition to Align is over. unsigned getAlignment() const { return getAlign().value(); } @@ -122,25 +127,18 @@ class AllocaInst : public UnaryInstruction { /// Return true if this alloca is used as an inalloca argument to a call. Such /// allocas are never considered static even if they are in the entry block. bool isUsedWithInAlloca() const { - return getSubclassDataFromInstruction() & 32; + return getSubclassData(); } /// Specify whether this alloca is used to represent the arguments to a call. void setUsedWithInAlloca(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) | - (V ? 32 : 0)); + setSubclassData(V); } /// Return true if this alloca is used as a swifterror argument to a call. - bool isSwiftError() const { - return getSubclassDataFromInstruction() & 64; - } - + bool isSwiftError() const { return getSubclassData(); } /// Specify whether this alloca is used to represent a swifterror. - void setSwiftError(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) | - (V ? 64 : 0)); - } + void setSwiftError(bool V) { setSubclassData(V); } // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { @@ -153,8 +151,9 @@ class AllocaInst : public UnaryInstruction { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } }; @@ -165,6 +164,11 @@ class AllocaInst : public UnaryInstruction { /// An instruction for reading from memory. This uses the SubclassData field in /// Value to store whether or not the load is volatile. class LoadInst : public UnaryInstruction { + using VolatileField = Bitfield::Element; // Next bit:1 + using AlignmentField = Bitfield::Element; // Next bit:7 + using OrderingField = Bitfield::Element; // Next bit:10 + void AssertOK(); protected: @@ -194,13 +198,10 @@ class LoadInst : public UnaryInstruction { BasicBlock *InsertAtEnd); /// Return true if this is a load from a volatile memory location. - bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } + bool isVolatile() const { return getSubclassData(); } /// Specify whether this is a volatile load or not. - void setVolatile(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - (V ? 1 : 0)); - } + void setVolatile(bool V) { setSubclassData(V); } /// Return the alignment of the access that is being performed. /// FIXME: Remove this function once transition to Align is over. @@ -209,21 +210,19 @@ class LoadInst : public UnaryInstruction { /// Return the alignment of the access that is being performed. Align getAlign() const { - return *decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + return *decodeMaybeAlign(getSubclassData()); } void setAlignment(Align Alignment); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); + return getSubclassData(); } - /// Sets the ordering constraint of this load instruction. May not be Release /// or AcquireRelease. void setOrdering(AtomicOrdering Ordering) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - ((unsigned)Ordering << 7)); + setSubclassData(Ordering); } /// Returns the synchronization scope ID of this load instruction. @@ -273,8 +272,9 @@ class LoadInst : public UnaryInstruction { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } /// The synchronization scope ID of this load instruction. Not quite enough @@ -289,6 +289,11 @@ class LoadInst : public UnaryInstruction { /// An instruction for storing to memory. class StoreInst : public Instruction { + using VolatileField = Bitfield::Element; // Next bit:1 + using AlignmentField = Bitfield::Element; // Next bit:7 + using OrderingField = Bitfield::Element; // Next bit:10 + void AssertOK(); protected: @@ -318,13 +323,10 @@ class StoreInst : public Instruction { } /// Return true if this is a store to a volatile memory location. - bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } + bool isVolatile() const { return getSubclassData(); } /// Specify whether this is a volatile store or not. - void setVolatile(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - (V ? 1 : 0)); - } + void setVolatile(bool V) { setSubclassData(V); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); @@ -335,21 +337,20 @@ class StoreInst : public Instruction { unsigned getAlignment() const { return getAlign().value(); } Align getAlign() const { - return *decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + return *decodeMaybeAlign(getSubclassData()); } void setAlignment(Align Alignment); /// Returns the ordering constraint of this store instruction. AtomicOrdering getOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); + return getSubclassData(); } /// Sets the ordering constraint of this store instruction. May not be /// Acquire or AcquireRelease. void setOrdering(AtomicOrdering Ordering) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - ((unsigned)Ordering << 7)); + setSubclassData(Ordering); } /// Returns the synchronization scope ID of this store instruction. @@ -402,8 +403,9 @@ class StoreInst : public Instruction { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } /// The synchronization scope ID of this store instruction. Not quite enough @@ -424,6 +426,9 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) /// An instruction for ordering other memory operations. class FenceInst : public Instruction { + using OrderingField = Bitfield::Element; // Next bit:4 + void Init(AtomicOrdering Ordering, SyncScope::ID SSID); protected: @@ -448,14 +453,13 @@ class FenceInst : public Instruction { /// Returns the ordering constraint of this fence instruction. AtomicOrdering getOrdering() const { - return AtomicOrdering(getSubclassDataFromInstruction() >> 1); + return getSubclassData(); } /// Sets the ordering constraint of this fence instruction. May only be /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { - setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | - ((unsigned)Ordering << 1)); + setSubclassData(Ordering); } /// Returns the synchronization scope ID of this fence instruction. @@ -479,8 +483,9 @@ class FenceInst : public Instruction { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } /// The synchronization scope ID of this fence instruction. Not quite enough @@ -525,6 +530,16 @@ class AtomicCmpXchgInst : public Instruction { return User::operator new(s, 3); } + // FIXME: Reuse bit 1 that was used by `syncscope.` + using VolatileField = Bitfield::Element; // Next bit:1 + using SuccessOrderingField = + Bitfield::Element; // Next bit:5 + using FailureOrderingField = + Bitfield::Element; // Next bit:8 + using WeakField = Bitfield::Element; // Next bit:9 + /// Always returns the natural type alignment. /// FIXME: Introduce a proper alignment /// https://bugs.llvm.org/show_bug.cgi?id=27168 @@ -533,54 +548,42 @@ class AtomicCmpXchgInst : public Instruction { /// Return true if this is a cmpxchg from a volatile memory /// location. /// - bool isVolatile() const { - return getSubclassDataFromInstruction() & 1; - } + bool isVolatile() const { return getSubclassData(); } /// Specify whether this is a volatile cmpxchg. /// - void setVolatile(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - (unsigned)V); - } + void setVolatile(bool V) { setSubclassData(V); } /// Return true if this cmpxchg may spuriously fail. - bool isWeak() const { - return getSubclassDataFromInstruction() & 0x100; - } + bool isWeak() const { return getSubclassData(); } - void setWeak(bool IsWeak) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) | - (IsWeak << 8)); - } + void setWeak(bool IsWeak) { setSubclassData(IsWeak); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Returns the success ordering constraint of this cmpxchg instruction. AtomicOrdering getSuccessOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); + return getSubclassData(); } /// Sets the success ordering constraint of this cmpxchg instruction. void setSuccessOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | - ((unsigned)Ordering << 2)); + setSubclassData(Ordering); } /// Returns the failure ordering constraint of this cmpxchg instruction. AtomicOrdering getFailureOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); + return getSubclassData(); } /// Sets the failure ordering constraint of this cmpxchg instruction. void setFailureOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | - ((unsigned)Ordering << 5)); + setSubclassData(Ordering); } /// Returns the synchronization scope ID of this cmpxchg instruction. @@ -642,8 +645,9 @@ class AtomicCmpXchgInst : public Instruction { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } /// The synchronization scope ID of this cmpxchg instruction. Not quite @@ -679,7 +683,7 @@ class AtomicRMWInst : public Instruction { /// the descriptions, 'p' is the pointer to the instruction's memory location, /// 'old' is the initial value of *p, and 'v' is the other value passed to the /// instruction. These instructions always return 'old'. - enum BinOp { + enum BinOp : unsigned { /// *p = v Xchg, /// *p = old + v @@ -726,9 +730,15 @@ class AtomicRMWInst : public Instruction { return User::operator new(s, 2); } - BinOp getOperation() const { - return static_cast(getSubclassDataFromInstruction() >> 5); - } + // FIXME: Reuse bit 1 that was used by `syncscope.` + using VolatileField = Bitfield::Element; // Next bit:1 + using AtomicOrderingField = + Bitfield::Element; // Next bit:5 + using OperationField = Bitfield::Element; // Next bit:9 + + BinOp getOperation() const { return getSubclassData(); } static StringRef getOperationName(BinOp Op); @@ -743,9 +753,7 @@ class AtomicRMWInst : public Instruction { } void setOperation(BinOp Operation) { - unsigned short SubclassData = getSubclassDataFromInstruction(); - setInstructionSubclassData((SubclassData & 31) | - (Operation << 5)); + setSubclassData(Operation); } /// Always returns the natural type alignment. @@ -755,31 +763,25 @@ class AtomicRMWInst : public Instruction { /// Return true if this is a RMW on a volatile memory location. /// - bool isVolatile() const { - return getSubclassDataFromInstruction() & 1; - } + bool isVolatile() const { return getSubclassData(); } /// Specify whether this is a volatile RMW or not. /// - void setVolatile(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - (unsigned)V); - } + void setVolatile(bool V) { setSubclassData(V); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Returns the ordering constraint of this rmw instruction. AtomicOrdering getOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); + return getSubclassData(); } /// Sets the ordering constraint of this rmw instruction. void setOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | - ((unsigned)Ordering << 2)); + setSubclassData(Ordering); } /// Returns the synchronization scope ID of this rmw instruction. @@ -822,8 +824,9 @@ class AtomicRMWInst : public Instruction { // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } /// The synchronization scope ID of this rmw instruction. Not quite enough @@ -1552,37 +1555,35 @@ class CallInst : public CallBase { BasicBlock *InsertAtEnd); // Note that 'musttail' implies 'tail'. - enum TailCallKind { + enum TailCallKind : unsigned { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2, - TCK_NoTail = 3 + TCK_NoTail = 3, + TCK_LAST = TCK_NoTail }; + + using TailCallKindField = Bitfield::Element; + TailCallKind getTailCallKind() const { - return TailCallKind(getSubclassDataFromInstruction() & 3); + return getSubclassData(); } bool isTailCall() const { - unsigned Kind = getSubclassDataFromInstruction() & 3; + TailCallKind Kind = getTailCallKind(); return Kind == TCK_Tail || Kind == TCK_MustTail; } - bool isMustTailCall() const { - return (getSubclassDataFromInstruction() & 3) == TCK_MustTail; - } + bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } - bool isNoTailCall() const { - return (getSubclassDataFromInstruction() & 3) == TCK_NoTail; - } + bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } - void setTailCall(bool isTC = true) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | - unsigned(isTC ? TCK_Tail : TCK_None)); + void setTailCallKind(TailCallKind TCK) { + setSubclassData(TCK); } - void setTailCallKind(TailCallKind TCK) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | - unsigned(TCK)); + void setTailCall(bool IsTc = true) { + setTailCallKind(IsTc ? TCK_Tail : TCK_None); } /// Return true if the call can return twice @@ -1605,8 +1606,9 @@ class CallInst : public CallBase { private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } }; @@ -2725,6 +2727,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) /// cleanup. /// class LandingPadInst : public Instruction { + using CleanupField = Bitfield::Element; + /// The number of operands actually allocated. NumOperands is /// the number actually in use. unsigned ReservedSpace; @@ -2769,13 +2773,10 @@ class LandingPadInst : public Instruction { /// Return 'true' if this landingpad instruction is a /// cleanup. I.e., it should be run when unwinding even if its landing pad /// doesn't catch the exception. - bool isCleanup() const { return getSubclassDataFromInstruction() & 1; } + bool isCleanup() const { return getSubclassData(); } /// Indicate that this landingpad instruction is a cleanup. - void setCleanup(bool V) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - (V ? 1 : 0)); - } + void setCleanup(bool V) { setSubclassData(V); } /// Add a catch or filter clause to the landing pad. void addClause(Constant *ClauseVal); @@ -3762,11 +3763,11 @@ class InvokeInst : public CallBase { } private: - // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } }; @@ -4002,11 +4003,11 @@ class CallBrInst : public CallBase { } private: - // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } }; @@ -4097,6 +4098,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) // CatchSwitchInst Class //===----------------------------------------------------------------------===// class CatchSwitchInst : public Instruction { + using UnwindDestField = Bitfield::Element; // Next bit:1 + /// The number of operands actually allocated. NumOperands is /// the number actually in use. unsigned ReservedSpace; @@ -4158,7 +4161,7 @@ class CatchSwitchInst : public Instruction { void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } // Accessor Methods for CatchSwitch stmt - bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } + bool hasUnwindDest() const { return getSubclassData(); } bool unwindsToCaller() const { return !hasUnwindDest(); } BasicBlock *getUnwindDest() const { if (hasUnwindDest()) @@ -4444,6 +4447,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) //===----------------------------------------------------------------------===// class CleanupReturnInst : public Instruction { + using UnwindDestField = Bitfield::Element; // Next bit:1 private: CleanupReturnInst(const CleanupReturnInst &RI); CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, @@ -4484,7 +4488,7 @@ class CleanupReturnInst : public Instruction { /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); - bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; } + bool hasUnwindDest() const { return getSubclassData(); } bool unwindsToCaller() const { return !hasUnwindDest(); } /// Convenience accessor. @@ -4528,8 +4532,9 @@ class CleanupReturnInst : public Instruction { // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. - void setInstructionSubclassData(unsigned short D) { - Instruction::setInstructionSubclassData(D); + template + void setSubclassData(typename Bitfield::Type Value) { + Instruction::setSubclassData(Value); } }; diff --git a/llvm/include/llvm/Support/AtomicOrdering.h b/llvm/include/llvm/Support/AtomicOrdering.h index 763bc3ea7b282a..a8d89955fa2b5c 100644 --- a/llvm/include/llvm/Support/AtomicOrdering.h +++ b/llvm/include/llvm/Support/AtomicOrdering.h @@ -53,7 +53,7 @@ template inline bool isValidAtomicOrderingCABI(Int I) { /// /// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst /// \-->consume-->acquire--/ -enum class AtomicOrdering { +enum class AtomicOrdering : unsigned { NotAtomic = 0, Unordered = 1, Monotonic = 2, // Equivalent to C++'s relaxed. @@ -61,7 +61,8 @@ enum class AtomicOrdering { Acquire = 4, Release = 5, AcquireRelease = 6, - SequentiallyConsistent = 7 + SequentiallyConsistent = 7, + LAST = SequentiallyConsistent }; bool operator<(AtomicOrdering, AtomicOrdering) = delete; diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index b06504742e7dc2..8b333a5e2c0c68 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -960,7 +960,8 @@ CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) OperandTraits::op_end(this) - CRI.getNumOperands(), CRI.getNumOperands()) { - setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); + setSubclassData( + CRI.getSubclassData()); Op<0>() = CRI.Op<0>(); if (CRI.hasUnwindDest()) Op<1>() = CRI.Op<1>(); @@ -968,7 +969,7 @@ CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { if (UnwindBB) - setInstructionSubclassData(getSubclassDataFromInstruction() | 1); + setSubclassData(true); Op<0>() = CleanupPad; if (UnwindBB) @@ -1072,7 +1073,7 @@ void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, Op<0>() = ParentPad; if (UnwindDest) { - setInstructionSubclassData(getSubclassDataFromInstruction() | 1); + setSubclassData(true); setUnwindDest(UnwindDest); } } @@ -1299,9 +1300,7 @@ AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, void AllocaInst::setAlignment(Align Align) { assert(Align <= MaximumAlignment && "Alignment is greater than MaximumAlignment!"); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | - encode(Align)); - assert(getAlignment() == Align.value() && "Alignment representation error!"); + setSubclassData(encode(Align)); } bool AllocaInst::isArrayAllocation() const { @@ -1397,9 +1396,7 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, void LoadInst::setAlignment(Align Align) { assert(Align <= MaximumAlignment && "Alignment is greater than MaximumAlignment!"); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | - (encode(Align) << 1)); - assert(getAlign() == Align && "Alignment representation error!"); + setSubclassData(encode(Align)); } //===----------------------------------------------------------------------===// @@ -1476,9 +1473,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align, void StoreInst::setAlignment(Align Alignment) { assert(Alignment <= MaximumAlignment && "Alignment is greater than MaximumAlignment!"); - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | - (encode(Alignment) << 1)); - assert(getAlign() == Alignment && "Alignment representation error!"); + setSubclassData(encode(Alignment)); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 08eda86f1b94d8..dbf4b8003e4dc0 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4553,7 +4553,7 @@ static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); const auto *CD = cast(N->getOperand(3)); - int CondCode = CD->getSExtValue(); + unsigned CondCode = CD->getZExtValue(); if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) return DAG.getUNDEF(VT); @@ -4590,7 +4590,7 @@ static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, EVT VT = N->getValueType(0); const auto *CD = cast(N->getOperand(3)); - int CondCode = CD->getSExtValue(); + unsigned CondCode = CD->getZExtValue(); if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) { return DAG.getUNDEF(VT); diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 313fe747e8e123..c911b37afac7eb 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -278,28 +278,28 @@ void ThreadSanitizer::initialize(Module &M) { TsanAtomicStore[i] = M.getOrInsertFunction( AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy); - for (int op = AtomicRMWInst::FIRST_BINOP; - op <= AtomicRMWInst::LAST_BINOP; ++op) { - TsanAtomicRMW[op][i] = nullptr; + for (unsigned Op = AtomicRMWInst::FIRST_BINOP; + Op <= AtomicRMWInst::LAST_BINOP; ++Op) { + TsanAtomicRMW[Op][i] = nullptr; const char *NamePart = nullptr; - if (op == AtomicRMWInst::Xchg) + if (Op == AtomicRMWInst::Xchg) NamePart = "_exchange"; - else if (op == AtomicRMWInst::Add) + else if (Op == AtomicRMWInst::Add) NamePart = "_fetch_add"; - else if (op == AtomicRMWInst::Sub) + else if (Op == AtomicRMWInst::Sub) NamePart = "_fetch_sub"; - else if (op == AtomicRMWInst::And) + else if (Op == AtomicRMWInst::And) NamePart = "_fetch_and"; - else if (op == AtomicRMWInst::Or) + else if (Op == AtomicRMWInst::Or) NamePart = "_fetch_or"; - else if (op == AtomicRMWInst::Xor) + else if (Op == AtomicRMWInst::Xor) NamePart = "_fetch_xor"; - else if (op == AtomicRMWInst::Nand) + else if (Op == AtomicRMWInst::Nand) NamePart = "_fetch_nand"; else continue; SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); - TsanAtomicRMW[op][i] = + TsanAtomicRMW[Op][i] = M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy); }