diff --git a/llvm/include/llvm/Support/X86FoldTablesUtils.h b/llvm/include/llvm/Support/X86FoldTablesUtils.h index c3850588c8522..bddff7068b826 100644 --- a/llvm/include/llvm/Support/X86FoldTablesUtils.h +++ b/llvm/include/llvm/Support/X86FoldTablesUtils.h @@ -34,19 +34,18 @@ enum { TB_FOLDED_BCAST = 1 << 7, // Minimum alignment required for load/store. - // Used for RegOp->MemOp conversion. Encoded as Log2(Align) + 1 to allow 0 - // to mean align of 0. - // (stored in bits 8 - 11) + // Used for RegOp->MemOp conversion. Encoded as Log2(Align) + // (stored in bits 9 - 11) TB_ALIGN_SHIFT = 8, - TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT, - TB_ALIGN_16 = 5 << TB_ALIGN_SHIFT, - TB_ALIGN_32 = 6 << TB_ALIGN_SHIFT, - TB_ALIGN_64 = 7 << TB_ALIGN_SHIFT, - TB_ALIGN_MASK = 0xf << TB_ALIGN_SHIFT, + TB_ALIGN_1 = 0 << TB_ALIGN_SHIFT, + TB_ALIGN_16 = 4 << TB_ALIGN_SHIFT, + TB_ALIGN_32 = 5 << TB_ALIGN_SHIFT, + TB_ALIGN_64 = 6 << TB_ALIGN_SHIFT, + TB_ALIGN_MASK = 0x7 << TB_ALIGN_SHIFT, // Broadcast type. // (stored in bits 12 - 13) - TB_BCAST_TYPE_SHIFT = 12, + TB_BCAST_TYPE_SHIFT = TB_ALIGN_SHIFT + 3, TB_BCAST_D = 0 << TB_BCAST_TYPE_SHIFT, TB_BCAST_Q = 1 << TB_BCAST_TYPE_SHIFT, TB_BCAST_SS = 2 << TB_BCAST_TYPE_SHIFT, diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index dbf5e8d535031..8284906bdd2fa 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6206,9 +6206,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_LOAD) || OpNum > 0; bool FoldedStore = isTwoAddrFold || (OpNum == 0 && I->Flags & TB_FOLDED_STORE); - MaybeAlign MinAlign = - decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT); - if (MinAlign && Alignment < *MinAlign) + if (Alignment < Align(1 << ((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT))) return nullptr; bool NarrowToMOV32rm = false; if (Size) { diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp index dc16c3349c2e3..9fd35484b3f84 100644 --- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp +++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp @@ -77,7 +77,7 @@ class X86FoldTablesEmitter { bool IsLoad = false; bool IsStore = false; bool IsAligned = false; - unsigned int Alignment = 0; + Align Alignment; X86FoldTableEntry() = default; X86FoldTableEntry(const CodeGenInstruction *RegInst, @@ -99,7 +99,7 @@ class X86FoldTablesEmitter { if (CannotFold) Attrs += "TB_NO_FORWARD|"; if (IsAligned) - Attrs += "TB_ALIGN_" + std::to_string(Alignment) + "|"; + Attrs += "TB_ALIGN_" + std::to_string(Alignment.value()) + "|"; StringRef SimplifiedAttrs = StringRef(Attrs).rtrim("|"); if (SimplifiedAttrs.empty()) @@ -389,8 +389,7 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table, Result.IsLoad = (S & TB_FOLDED_LOAD) != 0; Result.IsStore = (S & TB_FOLDED_STORE) != 0; Result.IsAligned = (S & TB_ALIGN_MASK) != 0; - auto AlignValue = (S & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; - Result.Alignment = AlignValue > 0 ? (1 << (AlignValue - 1)) : 0; + Result.Alignment = Align(1 << ((S & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT)); Table[RegInstr] = Result; return; } @@ -443,13 +442,13 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table, // The instruction require explicitly aligned memory. BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize"); Result.IsAligned = true; - Result.Alignment = byteFromBitsInit(VectSize); + Result.Alignment = Align(byteFromBitsInit(VectSize)); } else if (!Enc && !isExplicitUnalign(RegInstr) && getMemOperandSize(MemOpRec) > 64) { // Instructions with XOP/VEX/EVEX encoding do not require alignment while // SSE packed vector instructions require a 16 byte alignment. Result.IsAligned = true; - Result.Alignment = 16; + Result.Alignment = Align(16); } // Expand is only ever created as a masked instruction. It is not safe to // unfold a masked expand because we don't know if it came from an expand load