35 changes: 17 additions & 18 deletions llvm/include/llvm/CodeGen/MachineRegisterInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "llvm/ADT/iterator_range.h"
// PointerUnion needs to have access to the full RegisterBank type.
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Target/TargetRegisterInfo.h"
Expand Down Expand Up @@ -104,16 +105,16 @@ class MachineRegisterInfo {
/// started.
BitVector ReservedRegs;

typedef DenseMap<unsigned, unsigned> VRegToSizeMap;
typedef DenseMap<unsigned, LLT> VRegToTypeMap;
/// Map generic virtual registers to their actual size.
mutable std::unique_ptr<VRegToSizeMap> VRegToSize;
mutable std::unique_ptr<VRegToTypeMap> VRegToType;

/// Accessor for VRegToSize. This accessor should only be used
/// Accessor for VRegToType. This accessor should only be used
/// by global-isel related work.
VRegToSizeMap &getVRegToSize() const {
if (!VRegToSize)
VRegToSize.reset(new VRegToSizeMap);
return *VRegToSize.get();
VRegToTypeMap &getVRegToType() const {
if (!VRegToType)
VRegToType.reset(new VRegToTypeMap);
return *VRegToType.get();
}

/// Keep track of the physical registers that are live in to the function.
Expand Down Expand Up @@ -641,22 +642,20 @@ class MachineRegisterInfo {
///
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);

/// Get the size in bits of \p VReg or 0 if VReg is not a generic
/// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
/// (target independent) virtual register.
unsigned getSize(unsigned VReg) const;
LLT getType(unsigned VReg) const;

/// Set the size in bits of \p VReg to \p Size.
/// Although the size should be set at build time, mir infrastructure
/// is not yet able to do it.
void setSize(unsigned VReg, unsigned Size);
/// Set the low-level type of \p VReg to \p Ty.
void setType(unsigned VReg, LLT Ty);

/// Create and return a new generic virtual register with a size of \p Size.
/// \pre Size > 0.
unsigned createGenericVirtualRegister(unsigned Size);
/// Create and return a new generic virtual register with low-level
/// type \p Ty.
unsigned createGenericVirtualRegister(LLT Ty);

/// Remove all sizes associated to virtual registers (after instruction
/// Remove all types associated to virtual registers (after instruction
/// selection and constraining of all generic virtual registers).
void clearVirtRegSizes();
void clearVirtRegTypes();

/// getNumVirtRegs - Return the number of virtual registers created.
///
Expand Down
26 changes: 25 additions & 1 deletion llvm/include/llvm/MC/MCInstrDesc.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,22 @@ enum OperandType {
OPERAND_REGISTER = 2,
OPERAND_MEMORY = 3,
OPERAND_PCREL = 4,
OPERAND_FIRST_TARGET = 5

OPERAND_FIRST_GENERIC = 6,
OPERAND_GENERIC_0 = 6,
OPERAND_GENERIC_1 = 7,
OPERAND_GENERIC_2 = 8,
OPERAND_GENERIC_3 = 9,
OPERAND_GENERIC_4 = 10,
OPERAND_GENERIC_5 = 11,
OPERAND_LAST_GENERIC = 11,

OPERAND_FIRST_TARGET = 12,
};

enum GenericOperandType {
};

}

/// \brief This holds information about one operand of a machine instruction,
Expand Down Expand Up @@ -83,6 +97,16 @@ class MCOperandInfo {

/// \brief Set if this operand is a optional def.
bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }

bool isGenericType() const {
return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
OperandType <= MCOI::OPERAND_LAST_GENERIC;
}

unsigned getGenericTypeIndex() const {
assert(isGenericType() && "non-generic types don't have an index");
return OperandType - MCOI::OPERAND_FIRST_GENERIC;
}
};

//===----------------------------------------------------------------------===//
Expand Down
184 changes: 92 additions & 92 deletions llvm/include/llvm/Target/GenericOpcodes.td

Large diffs are not rendered by default.

14 changes: 14 additions & 0 deletions llvm/include/llvm/Target/Target.td
Original file line number Diff line number Diff line change
Expand Up @@ -700,6 +700,20 @@ def f32imm : Operand<f32>;
def f64imm : Operand<f64>;
}

// Register operands for generic instructions don't have an MVT, but do have
// constraints linking the operands (e.g. all operands of a G_ADD must
// have the same LLT).
class TypedOperand<string Ty> : Operand<untyped> {
let OperandType = Ty;
}

def type0 : TypedOperand<"OPERAND_GENERIC_0">;
def type1 : TypedOperand<"OPERAND_GENERIC_1">;
def type2 : TypedOperand<"OPERAND_GENERIC_2">;
def type3 : TypedOperand<"OPERAND_GENERIC_3">;
def type4 : TypedOperand<"OPERAND_GENERIC_4">;
def type5 : TypedOperand<"OPERAND_GENERIC_5">;

/// zero_reg definition - Special node to stand for the zero register.
///
def zero_reg;
Expand Down
72 changes: 26 additions & 46 deletions llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,7 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
unsigned Size = DL->getTypeSizeInBits(Val.getType());
unsigned VReg = MRI->createGenericVirtualRegister(Size);
unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), DL});
ValReg = VReg;

if (auto CV = dyn_cast<Constant>(&Val)) {
Expand Down Expand Up @@ -113,10 +112,7 @@ bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
unsigned Res = getOrCreateVReg(U);
MIRBuilder.buildInstr(Opcode, LLT{*U.getType()})
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
return true;
}

Expand All @@ -130,13 +126,9 @@ bool IRTranslator::translateCompare(const User &U) {
cast<ConstantExpr>(U).getPredicate());

if (CmpInst::isIntPredicate(Pred))
MIRBuilder.buildICmp(
{LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}}, Pred, Res, Op0,
Op1);
MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else
MIRBuilder.buildFCmp(
{LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}}, Pred, Res, Op0,
Op1);
MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);

return true;
}
Expand All @@ -158,7 +150,7 @@ bool IRTranslator::translateBr(const User &U) {
unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
MIRBuilder.buildBrCond(LLT{*BrInst.getCondition()->getType()}, Tst, TrueBB);
MIRBuilder.buildBrCond(Tst, TrueBB);
}

const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
Expand Down Expand Up @@ -186,7 +178,7 @@ bool IRTranslator::translateLoad(const User &U) {
LLT VTy{*LI.getType(), DL}, PTy{*LI.getPointerOperand()->getType()};

MIRBuilder.buildLoad(
VTy, PTy, Res, Addr,
Res, Addr,
*MF.getMachineMemOperand(
MachinePointerInfo(LI.getPointerOperand()), MachineMemOperand::MOLoad,
DL->getTypeStoreSize(LI.getType()), getMemOpAlignment(LI)));
Expand All @@ -208,7 +200,7 @@ bool IRTranslator::translateStore(const User &U) {
PTy{*SI.getPointerOperand()->getType()};

MIRBuilder.buildStore(
VTy, PTy, Val, Addr,
Val, Addr,
*MF.getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()),
MachineMemOperand::MOStore,
Expand Down Expand Up @@ -237,8 +229,7 @@ bool IRTranslator::translateExtractValue(const User &U) {
uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);

unsigned Res = getOrCreateVReg(U);
MIRBuilder.buildExtract(LLT{*U.getType(), DL}, Res, Offset,
LLT{*Src->getType(), DL}, getOrCreateVReg(*Src));
MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));

return true;
}
Expand All @@ -264,17 +255,16 @@ bool IRTranslator::translateInsertValue(const User &U) {

unsigned Res = getOrCreateVReg(U);
const Value &Inserted = *U.getOperand(1);
MIRBuilder.buildInsert(LLT{*U.getType(), DL}, Res, getOrCreateVReg(*Src),
LLT{*Inserted.getType(), DL},
getOrCreateVReg(Inserted), Offset);
MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
Offset);

return true;
}

bool IRTranslator::translateSelect(const User &U) {
MIRBuilder.buildSelect(
LLT{*U.getType()}, getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
getOrCreateVReg(*U.getOperand(1)), getOrCreateVReg(*U.getOperand(2)));
MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
getOrCreateVReg(*U.getOperand(1)),
getOrCreateVReg(*U.getOperand(2)));
return true;
}

Expand All @@ -293,10 +283,7 @@ bool IRTranslator::translateBitCast(const User &U) {
bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
unsigned Op = getOrCreateVReg(*U.getOperand(0));
unsigned Res = getOrCreateVReg(U);
MIRBuilder
.buildInstr(Opcode, {LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}})
.addDef(Res)
.addUse(Op);
MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
return true;
}

Expand All @@ -316,22 +303,21 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
LLT Ty{*CI.getOperand(0)->getType()};
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
unsigned Res = MRI->createGenericVirtualRegister(Width);
unsigned Overflow = MRI->createGenericVirtualRegister(1);
auto MIB = MIRBuilder.buildInstr(Op, {Ty, s1})
unsigned Res = MRI->createGenericVirtualRegister(Ty);
unsigned Overflow = MRI->createGenericVirtualRegister(s1);
auto MIB = MIRBuilder.buildInstr(Op)
.addDef(Res)
.addDef(Overflow)
.addUse(getOrCreateVReg(*CI.getOperand(0)))
.addUse(getOrCreateVReg(*CI.getOperand(1)));

if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
unsigned Zero = MRI->createGenericVirtualRegister(1);
EntryBuilder.buildConstant(s1, Zero, 0);
unsigned Zero = MRI->createGenericVirtualRegister(s1);
EntryBuilder.buildConstant(Zero, 0);
MIB.addUse(Zero);
}

MIRBuilder.buildSequence(LLT{*CI.getType(), DL}, getOrCreateVReg(CI), Ty, Res,
0, s1, Overflow, Width);
MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
return true;
}

Expand Down Expand Up @@ -361,15 +347,9 @@ bool IRTranslator::translateCall(const User &U) {
if (translateKnownIntrinsic(CI, ID))
return true;

// Need types (starting with return) & args.
SmallVector<LLT, 4> Tys;
Tys.emplace_back(*CI.getType());
for (auto &Arg : CI.arg_operands())
Tys.emplace_back(*Arg->getType());

unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
MachineInstrBuilder MIB =
MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory());
MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());

for (auto &Arg : CI.arg_operands()) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
Expand Down Expand Up @@ -399,13 +379,13 @@ bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {

unsigned Res = getOrCreateVReg(AI);
int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
MIRBuilder.buildFrameIndex(LLT::pointer(0), Res, FI);
MIRBuilder.buildFrameIndex(Res, FI);
return true;
}

bool IRTranslator::translatePHI(const User &U) {
const PHINode &PI = cast<PHINode>(U);
auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, LLT{*U.getType()});
auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
MIB.addDef(getOrCreateVReg(PI));

PendingPHIs.emplace_back(&PI, MIB.getInstr());
Expand Down Expand Up @@ -447,13 +427,13 @@ bool IRTranslator::translate(const Instruction &Inst) {

bool IRTranslator::translate(const Constant &C, unsigned Reg) {
if (auto CI = dyn_cast<ConstantInt>(&C))
EntryBuilder.buildConstant(LLT{*CI->getType()}, Reg, CI->getZExtValue());
EntryBuilder.buildConstant(Reg, CI->getZExtValue());
else if (auto CF = dyn_cast<ConstantFP>(&C))
EntryBuilder.buildFConstant(LLT{*CF->getType()}, Reg, *CF);
EntryBuilder.buildFConstant(Reg, *CF);
else if (isa<UndefValue>(C))
EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
else if (isa<ConstantPointerNull>(C))
EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT, LLT{*C.getType()})
EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
.addDef(Reg)
.addImm(0);
else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,11 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// The RegBankSelected property is already checked in the verifier. Note
// that it has the same layering problem, but we only use inline methods so
// end up not needing to link against the GlobalISel library.
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (const MachineLegalizer *MLI = MF.getSubtarget().getMachineLegalizer())
for (const MachineBasicBlock &MBB : MF)
for (const MachineInstr &MI : MBB)
if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI))
if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI))
reportSelectionError(MI, "Instruction is not legal");

#endif
Expand Down Expand Up @@ -118,7 +119,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// the vreg instead, but that's not ideal either, because it's saying that
// vregs have types, which they really don't. But then again, LLT is just
// a size and a "shape": it's probably the same information as regbank info.
MF.getRegInfo().clearVirtRegSizes();
MF.getRegInfo().clearVirtRegTypes();

// FIXME: Should we accurately track changes?
return true;
Expand Down
167 changes: 68 additions & 99 deletions llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetSubtargetInfo.h"
Expand All @@ -23,6 +24,7 @@ using namespace llvm;
void MachineIRBuilder::setMF(MachineFunction &MF) {
this->MF = &MF;
this->MBB = nullptr;
this->MRI = &MF.getRegInfo();
this->TII = MF.getSubtarget().getInstrInfo();
this->DL = DebugLoc();
this->MI = nullptr;
Expand Down Expand Up @@ -67,145 +69,122 @@ void MachineIRBuilder::stopRecordingInsertions() {
// Build instruction variants.
//------------------------------------------------------------------------------

MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode,
ArrayRef<LLT> Tys) {
MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
MachineInstrBuilder MIB = BuildMI(getMF(), DL, getTII().get(Opcode));
if (Tys.size() > 0) {
assert(isPreISelGenericOpcode(Opcode) &&
"Only generic instruction can have a type");
for (unsigned i = 0; i < Tys.size(); ++i)
MIB->setType(Tys[i], i);
} else
assert(!isPreISelGenericOpcode(Opcode) &&
"Generic instruction must have a type");
getMBB().insert(getInsertPt(), MIB);
if (InsertedInstr)
InsertedInstr(MIB);
return MIB;
}

MachineInstrBuilder MachineIRBuilder::buildFrameIndex(LLT Ty, unsigned Res,
int Idx) {
return buildInstr(TargetOpcode::G_FRAME_INDEX, Ty)
MachineInstrBuilder MachineIRBuilder::buildFrameIndex(unsigned Res, int Idx) {
return buildInstr(TargetOpcode::G_FRAME_INDEX)
.addDef(Res)
.addFrameIndex(Idx);
}

MachineInstrBuilder MachineIRBuilder::buildAdd(LLT Ty, unsigned Res,
unsigned Op0, unsigned Op1) {
return buildInstr(TargetOpcode::G_ADD, Ty)
MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
unsigned Op1) {
return buildInstr(TargetOpcode::G_ADD)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}

MachineInstrBuilder MachineIRBuilder::buildSub(LLT Ty, unsigned Res,
unsigned Op0, unsigned Op1) {
return buildInstr(TargetOpcode::G_SUB, Ty)
MachineInstrBuilder MachineIRBuilder::buildSub(unsigned Res, unsigned Op0,
unsigned Op1) {
return buildInstr(TargetOpcode::G_SUB)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}

MachineInstrBuilder MachineIRBuilder::buildMul(LLT Ty, unsigned Res,
unsigned Op0, unsigned Op1) {
return buildInstr(TargetOpcode::G_MUL, Ty)
MachineInstrBuilder MachineIRBuilder::buildMul(unsigned Res, unsigned Op0,
unsigned Op1) {
return buildInstr(TargetOpcode::G_MUL)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}

MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
return buildInstr(TargetOpcode::G_BR, LLT::unsized()).addMBB(&Dest);
return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
}

MachineInstrBuilder MachineIRBuilder::buildCopy(unsigned Res, unsigned Op) {
return buildInstr(TargetOpcode::COPY).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildConstant(LLT Ty, unsigned Res,
int64_t Val) {
return buildInstr(TargetOpcode::G_CONSTANT, Ty).addDef(Res).addImm(Val);
MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res, int64_t Val) {
return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addImm(Val);
}

MachineInstrBuilder MachineIRBuilder::buildFConstant(LLT Ty, unsigned Res,
const ConstantFP &Val) {
return buildInstr(TargetOpcode::G_FCONSTANT, Ty).addDef(Res).addFPImm(&Val);
MachineInstrBuilder MachineIRBuilder::buildFConstant(unsigned Res,
const ConstantFP &Val) {
return buildInstr(TargetOpcode::G_FCONSTANT).addDef(Res).addFPImm(&Val);
}

MachineInstrBuilder MachineIRBuilder::buildBrCond(LLT Ty, unsigned Tst,
MachineInstrBuilder MachineIRBuilder::buildBrCond(unsigned Tst,
MachineBasicBlock &Dest) {
return buildInstr(TargetOpcode::G_BRCOND, Ty).addUse(Tst).addMBB(&Dest);
return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
}


MachineInstrBuilder MachineIRBuilder::buildLoad(LLT VTy, LLT PTy, unsigned Res,
unsigned Addr,
MachineMemOperand &MMO) {
return buildInstr(TargetOpcode::G_LOAD, {VTy, PTy})
MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO) {
return buildInstr(TargetOpcode::G_LOAD)
.addDef(Res)
.addUse(Addr)
.addMemOperand(&MMO);
}

MachineInstrBuilder MachineIRBuilder::buildStore(LLT VTy, LLT PTy,
unsigned Val, unsigned Addr,
MachineMemOperand &MMO) {
return buildInstr(TargetOpcode::G_STORE, {VTy, PTy})
MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO) {
return buildInstr(TargetOpcode::G_STORE)
.addUse(Val)
.addUse(Addr)
.addMemOperand(&MMO);
}

MachineInstrBuilder
MachineIRBuilder::buildUAdde(ArrayRef<LLT> Tys, unsigned Res, unsigned CarryOut,
unsigned Op0, unsigned Op1, unsigned CarryIn) {
return buildInstr(TargetOpcode::G_UADDE, Tys)
MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
unsigned CarryOut,
unsigned Op0, unsigned Op1,
unsigned CarryIn) {
return buildInstr(TargetOpcode::G_UADDE)
.addDef(Res)
.addDef(CarryOut)
.addUse(Op0)
.addUse(Op1)
.addUse(CarryIn);
}

MachineInstrBuilder MachineIRBuilder::buildType(LLT Ty,
unsigned Res, unsigned Op) {
return buildInstr(TargetOpcode::G_TYPE, Ty).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildType(unsigned Res, unsigned Op) {
return buildInstr(TargetOpcode::G_TYPE).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildAnyExt(ArrayRef<LLT> Tys,
unsigned Res, unsigned Op) {
validateTruncExt(Tys, true);
return buildInstr(TargetOpcode::G_ANYEXT, Tys).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildAnyExt(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_ANYEXT).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildSExt(ArrayRef<LLT> Tys, unsigned Res,
unsigned Op) {
validateTruncExt(Tys, true);
return buildInstr(TargetOpcode::G_SEXT, Tys).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildSExt(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_SEXT).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildZExt(ArrayRef<LLT> Tys, unsigned Res,
unsigned Op) {
validateTruncExt(Tys, true);
return buildInstr(TargetOpcode::G_ZEXT, Tys).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildZExt(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, true);
return buildInstr(TargetOpcode::G_ZEXT).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<LLT> ResTys,
ArrayRef<unsigned> Results,
MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<unsigned> Results,
ArrayRef<uint64_t> Indices,
LLT SrcTy, unsigned Src) {
assert(ResTys.size() == Results.size() && Results.size() == Indices.size() &&
"inconsistent number of regs");
unsigned Src) {
assert(Results.size() == Indices.size() && "inconsistent number of regs");
assert(!Results.empty() && "invalid trivial extract");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"extract offsets must be in ascending order");

auto MIB = BuildMI(getMF(), DL, getTII().get(TargetOpcode::G_EXTRACT));
for (unsigned i = 0; i < ResTys.size(); ++i)
MIB->setType(LLT::scalar(ResTys[i].getSizeInBits()), i);
MIB->setType(LLT::scalar(SrcTy.getSizeInBits()), ResTys.size());

for (auto Res : Results)
MIB.addDef(Res);

Expand All @@ -222,89 +201,79 @@ MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<LLT> ResTys,
}

MachineInstrBuilder
MachineIRBuilder::buildSequence(LLT ResTy, unsigned Res,
ArrayRef<LLT> OpTys,
MachineIRBuilder::buildSequence(unsigned Res,
ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Indices) {
assert(OpTys.size() == Ops.size() && Ops.size() == Indices.size() &&
"incompatible args");
assert(Ops.size() == Indices.size() && "incompatible args");
assert(!Ops.empty() && "invalid trivial sequence");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"sequence offsets must be in ascending order");

MachineInstrBuilder MIB =
buildInstr(TargetOpcode::G_SEQUENCE, LLT::scalar(ResTy.getSizeInBits()));
MachineInstrBuilder MIB = buildInstr(TargetOpcode::G_SEQUENCE);
MIB.addDef(Res);
for (unsigned i = 0; i < Ops.size(); ++i) {
MIB.addUse(Ops[i]);
MIB.addImm(Indices[i]);
MIB->setType(LLT::scalar(OpTys[i].getSizeInBits()), MIB->getNumTypes());
}
return MIB;
}

MachineInstrBuilder MachineIRBuilder::buildIntrinsic(ArrayRef<LLT> Tys,
Intrinsic::ID ID,
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
unsigned Res,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
: TargetOpcode::G_INTRINSIC,
Tys);
: TargetOpcode::G_INTRINSIC);
if (Res)
MIB.addDef(Res);
MIB.addIntrinsicID(ID);
return MIB;
}

MachineInstrBuilder MachineIRBuilder::buildTrunc(ArrayRef<LLT> Tys,
unsigned Res, unsigned Op) {
validateTruncExt(Tys, false);
return buildInstr(TargetOpcode::G_TRUNC, Tys).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildTrunc(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, false);
return buildInstr(TargetOpcode::G_TRUNC).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildFPTrunc(ArrayRef<LLT> Tys,
unsigned Res, unsigned Op) {
validateTruncExt(Tys, false);
return buildInstr(TargetOpcode::G_FPTRUNC, Tys).addDef(Res).addUse(Op);
MachineInstrBuilder MachineIRBuilder::buildFPTrunc(unsigned Res, unsigned Op) {
validateTruncExt(Res, Op, false);
return buildInstr(TargetOpcode::G_FPTRUNC).addDef(Res).addUse(Op);
}

MachineInstrBuilder MachineIRBuilder::buildICmp(ArrayRef<LLT> Tys,
CmpInst::Predicate Pred,
MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0,
unsigned Op1) {
return buildInstr(TargetOpcode::G_ICMP, Tys)
return buildInstr(TargetOpcode::G_ICMP)
.addDef(Res)
.addPredicate(Pred)
.addUse(Op0)
.addUse(Op1);
}

MachineInstrBuilder MachineIRBuilder::buildFCmp(ArrayRef<LLT> Tys,
CmpInst::Predicate Pred,
MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0,
unsigned Op1) {
return buildInstr(TargetOpcode::G_FCMP, Tys)
return buildInstr(TargetOpcode::G_FCMP)
.addDef(Res)
.addPredicate(Pred)
.addUse(Op0)
.addUse(Op1);
}

MachineInstrBuilder MachineIRBuilder::buildSelect(LLT Ty, unsigned Res,
unsigned Tst,
MachineInstrBuilder MachineIRBuilder::buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1) {
return buildInstr(TargetOpcode::G_SELECT, {Ty, LLT::scalar(1)})
return buildInstr(TargetOpcode::G_SELECT)
.addDef(Res)
.addUse(Tst)
.addUse(Op0)
.addUse(Op1);
}

void MachineIRBuilder::validateTruncExt(ArrayRef<LLT> Tys, bool IsExtend) {
void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
bool IsExtend) {
#ifndef NDEBUG
assert(Tys.size() == 2 && "cast should have a source and a dest type");
LLT DstTy{Tys[0]}, SrcTy{Tys[1]};
LLT SrcTy = MRI->getType(Src);
LLT DstTy = MRI->getType(Dst);

if (DstTy.isVector()) {
assert(SrcTy.isVector() && "mismatched cast between vecot and non-vector");
Expand Down
152 changes: 70 additions & 82 deletions llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ MachineLegalizeHelper::MachineLegalizeHelper(MachineFunction &MF)
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::legalizeInstrStep(MachineInstr &MI,
const MachineLegalizer &Legalizer) {
auto Action = Legalizer.getAction(MI);
auto Action = Legalizer.getAction(MI, MRI);
switch (std::get<0>(Action)) {
case MachineLegalizer::Legal:
return AlreadyLegal;
Expand Down Expand Up @@ -85,19 +85,17 @@ void MachineLegalizeHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
SmallVectorImpl<unsigned> &VRegs) {
unsigned Size = Ty.getSizeInBits();
SmallVector<uint64_t, 4> Indexes;
SmallVector<LLT, 4> ResTys;
for (int i = 0; i < NumParts; ++i) {
VRegs.push_back(MRI.createGenericVirtualRegister(Size));
VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
Indexes.push_back(i * Size);
ResTys.push_back(Ty);
}
MIRBuilder.buildExtract(ResTys, VRegs, Indexes,
LLT::scalar(Ty.getSizeInBits() * NumParts), Reg);
MIRBuilder.buildExtract(VRegs, Indexes, Reg);
}

MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::libcall(MachineInstr &MI) {
unsigned Size = MI.getType().getSizeInBits();
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned Size = Ty.getSizeInBits();
MIRBuilder.setInstr(MI);

switch (MI.getOpcode()) {
Expand Down Expand Up @@ -132,32 +130,31 @@ MachineLegalizeHelper::narrowScalar(MachineInstr &MI, unsigned TypeIdx,
case TargetOpcode::G_ADD: {
// Expand in terms of carry-setting/consuming G_ADDE instructions.
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts = MI.getType().getSizeInBits() / NarrowSize;
int NumParts = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() /
NarrowTy.getSizeInBits();

MIRBuilder.setInstr(MI);

SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

unsigned CarryIn = MRI.createGenericVirtualRegister(1);
MIRBuilder.buildConstant(LLT::scalar(1), CarryIn, 0);
unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
MIRBuilder.buildConstant(CarryIn, 0);

SmallVector<LLT, 2> DstTys;
for (int i = 0; i < NumParts; ++i) {
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowSize);
unsigned CarryOut = MRI.createGenericVirtualRegister(1);
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));

MIRBuilder.buildUAdde(NarrowTy, DstReg, CarryOut, Src1Regs[i],
MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
Src2Regs[i], CarryIn);

DstTys.push_back(NarrowTy);
DstRegs.push_back(DstReg);
Indexes.push_back(i * NarrowSize);
CarryIn = CarryOut;
}
MIRBuilder.buildSequence(MI.getType(), MI.getOperand(0).getReg(), DstTys,
DstRegs, Indexes);
unsigned DstReg = MI.getOperand(0).getReg();
MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
MI.eraseFromParent();
return Legalized;
}
Expand All @@ -167,7 +164,7 @@ MachineLegalizeHelper::narrowScalar(MachineInstr &MI, unsigned TypeIdx,
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
LLT Ty = MI.getType();
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned WideSize = WideTy.getSizeInBits();
MIRBuilder.setInstr(MI);

Expand All @@ -183,16 +180,18 @@ MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
unsigned Src1Ext = MRI.createGenericVirtualRegister(WideSize);
unsigned Src2Ext = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildAnyExt({WideTy, Ty}, Src1Ext, MI.getOperand(1).getReg());
MIRBuilder.buildAnyExt({WideTy, Ty}, Src2Ext, MI.getOperand(2).getReg());

unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildInstr(MI.getOpcode(), WideTy)
.addDef(DstExt).addUse(Src1Ext).addUse(Src2Ext);

MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(1).getReg());
MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(2).getReg());

unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(DstExt)
.addUse(Src1Ext)
.addUse(Src2Ext);

MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
Expand All @@ -202,89 +201,81 @@ MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;

unsigned LHSExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildInstr(ExtOp, {WideTy, MI.getType()})
.addDef(LHSExt)
.addUse(MI.getOperand(1).getReg());
unsigned LHSExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(ExtOp).addDef(LHSExt).addUse(
MI.getOperand(1).getReg());

unsigned RHSExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildInstr(ExtOp, {WideTy, MI.getType()})
.addDef(RHSExt)
.addUse(MI.getOperand(2).getReg());
unsigned RHSExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(ExtOp).addDef(RHSExt).addUse(
MI.getOperand(2).getReg());

unsigned ResExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildInstr(MI.getOpcode(), WideTy)
unsigned ResExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(ResExt)
.addUse(LHSExt)
.addUse(RHSExt);

MIRBuilder.buildTrunc({MI.getType(), WideTy}, MI.getOperand(0).getReg(),
ResExt);
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ResExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_LOAD: {
assert(alignTo(Ty.getSizeInBits(), 8) == WideSize &&
"illegal to increase number of bytes loaded");

unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildLoad(WideTy, MI.getType(1), DstExt,
MI.getOperand(1).getReg(), **MI.memoperands_begin());
MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildLoad(DstExt, MI.getOperand(1).getReg(),
**MI.memoperands_begin());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_STORE: {
assert(alignTo(Ty.getSizeInBits(), 8) == WideSize &&
"illegal to increase number of bytes modified by a store");

unsigned SrcExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildAnyExt({WideTy, Ty}, SrcExt, MI.getOperand(0).getReg());
MIRBuilder.buildStore(WideTy, MI.getType(1), SrcExt,
MI.getOperand(1).getReg(), **MI.memoperands_begin());
unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(SrcExt, MI.getOperand(0).getReg());
MIRBuilder.buildStore(SrcExt, MI.getOperand(1).getReg(),
**MI.memoperands_begin());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_CONSTANT: {
unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildConstant(WideTy, DstExt, MI.getOperand(1).getImm());
MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildConstant(DstExt, MI.getOperand(1).getImm());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FCONSTANT: {
unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildFConstant(WideTy, DstExt, *MI.getOperand(1).getFPImm());
MIRBuilder.buildFPTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildFConstant(DstExt, *MI.getOperand(1).getFPImm());
MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_BRCOND: {
unsigned TstExt = MRI.createGenericVirtualRegister(WideSize);
MIRBuilder.buildAnyExt({WideTy, Ty}, TstExt, MI.getOperand(0).getReg());
MIRBuilder.buildBrCond(WideTy, TstExt, *MI.getOperand(1).getMBB());
unsigned TstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(TstExt, MI.getOperand(0).getReg());
MIRBuilder.buildBrCond(TstExt, *MI.getOperand(1).getMBB());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_ICMP: {
assert(TypeIdx == 1 && "unable to legalize predicate");
bool IsSigned = CmpInst::isSigned(
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()));
unsigned Op0Ext = MRI.createGenericVirtualRegister(WideSize);
unsigned Op1Ext = MRI.createGenericVirtualRegister(WideSize);
unsigned Op0Ext = MRI.createGenericVirtualRegister(WideTy);
unsigned Op1Ext = MRI.createGenericVirtualRegister(WideTy);
if (IsSigned) {
MIRBuilder.buildSExt({WideTy, MI.getType(1)}, Op0Ext,
MI.getOperand(2).getReg());
MIRBuilder.buildSExt({WideTy, MI.getType(1)}, Op1Ext,
MI.getOperand(3).getReg());
MIRBuilder.buildSExt(Op0Ext, MI.getOperand(2).getReg());
MIRBuilder.buildSExt(Op1Ext, MI.getOperand(3).getReg());
} else {
MIRBuilder.buildZExt({WideTy, MI.getType(1)}, Op0Ext,
MI.getOperand(2).getReg());
MIRBuilder.buildZExt({WideTy, MI.getType(1)}, Op1Ext,
MI.getOperand(3).getReg());
MIRBuilder.buildZExt(Op0Ext, MI.getOperand(2).getReg());
MIRBuilder.buildZExt(Op1Ext, MI.getOperand(3).getReg());
}
MIRBuilder.buildICmp(
{MI.getType(0), WideTy},
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
MI.getOperand(0).getReg(), Op0Ext, Op1Ext);
MI.eraseFromParent();
Expand All @@ -296,24 +287,23 @@ MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
using namespace TargetOpcode;
unsigned Size = Ty.getSizeInBits();
MIRBuilder.setInstr(MI);

switch(MI.getOpcode()) {
default:
return UnableToLegalize;
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM: {
unsigned QuotReg = MRI.createGenericVirtualRegister(Size);
MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, Ty)
unsigned QuotReg = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
.addDef(QuotReg)
.addUse(MI.getOperand(1).getReg())
.addUse(MI.getOperand(2).getReg());

unsigned ProdReg = MRI.createGenericVirtualRegister(Size);
MIRBuilder.buildMul(Ty, ProdReg, QuotReg, MI.getOperand(2).getReg());
MIRBuilder.buildSub(Ty, MI.getOperand(0).getReg(),
MI.getOperand(1).getReg(), ProdReg);
unsigned ProdReg = MRI.createGenericVirtualRegister(Ty);
MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
ProdReg);
MI.eraseFromParent();
return Legalized;
}
Expand All @@ -331,25 +321,23 @@ MachineLegalizeHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
return UnableToLegalize;
case TargetOpcode::G_ADD: {
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts = MI.getType().getSizeInBits() / NarrowSize;
unsigned DstReg = MI.getOperand(0).getReg();
int NumParts = MRI.getType(DstReg).getSizeInBits() / NarrowSize;

MIRBuilder.setInstr(MI);

SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs, Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);

SmallVector<LLT, 2> DstTys;
for (int i = 0; i < NumParts; ++i) {
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowSize);
MIRBuilder.buildAdd(NarrowTy, DstReg, Src1Regs[i], Src2Regs[i]);
DstTys.push_back(NarrowTy);
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
DstRegs.push_back(DstReg);
Indexes.push_back(i * NarrowSize);
}

MIRBuilder.buildSequence(MI.getType(), MI.getOperand(0).getReg(), DstTys,
DstRegs, Indexes);
MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
MI.eraseFromParent();
return Legalized;
}
Expand Down
10 changes: 7 additions & 3 deletions llvm/lib/CodeGen/GlobalISel/MachineLegalizePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,18 @@ bool MachineLegalizePass::combineExtracts(MachineInstr &MI,
SeqI.getOperand(2 * SeqIdx + 2).getImm() < ExtractPos)
++SeqIdx;

if (SeqIdx == NumSeqSrcs ||
SeqI.getOperand(2 * SeqIdx + 2).getImm() != ExtractPos ||
SeqI.getType(SeqIdx + 1) != MI.getType(Idx)) {
if (SeqIdx == NumSeqSrcs) {
AllDefsReplaced = false;
continue;
}

unsigned OrigReg = SeqI.getOperand(2 * SeqIdx + 1).getReg();
if (SeqI.getOperand(2 * SeqIdx + 2).getImm() != ExtractPos ||
MRI.getType(OrigReg) != MRI.getType(ExtractReg)) {
AllDefsReplaced = false;
continue;
}

assert(!TargetRegisterInfo::isPhysicalRegister(OrigReg) &&
"unexpected physical register in G_SEQUENCE");

Expand Down
31 changes: 25 additions & 6 deletions llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@
//===----------------------------------------------------------------------===//

#include "llvm/CodeGen/GlobalISel/MachineLegalizer.h"

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Target/TargetOpcodes.h"
Expand Down Expand Up @@ -116,17 +119,33 @@ MachineLegalizer::getAction(const InstrAspect &Aspect) const {
}

std::tuple<MachineLegalizer::LegalizeAction, unsigned, LLT>
MachineLegalizer::getAction(const MachineInstr &MI) const {
for (unsigned i = 0; i < MI.getNumTypes(); ++i) {
auto Action = getAction({MI.getOpcode(), i, MI.getType(i)});
MachineLegalizer::getAction(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const {
SmallBitVector SeenTypes(8);
const MCOperandInfo *OpInfo = MI.getDesc().OpInfo;
for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) {
if (!OpInfo[i].isGenericType())
continue;

// We don't want to repeatedly check the same operand index, that
// could get expensive.
unsigned TypeIdx = OpInfo[i].getGenericTypeIndex();
if (SeenTypes[TypeIdx])
continue;

SeenTypes.set(TypeIdx);

LLT Ty = MRI.getType(MI.getOperand(i).getReg());
auto Action = getAction({MI.getOpcode(), TypeIdx, Ty});
if (Action.first != Legal)
return std::make_tuple(Action.first, i, Action.second);
return std::make_tuple(Action.first, TypeIdx, Action.second);
}
return std::make_tuple(Legal, 0, LLT{});
}

bool MachineLegalizer::isLegal(const MachineInstr &MI) const {
return std::get<0>(getAction(MI)) == Legal;
bool MachineLegalizer::isLegal(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const {
return std::get<0>(getAction(MI, MRI)) == Legal;
}

LLT MachineLegalizer::findLegalType(const InstrAspect &Aspect,
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -575,10 +575,11 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
// Legalized property, so it should be.
// FIXME: This should be in the MachineVerifier, but it can't use the
// MachineLegalizer as it's currently in the separate GlobalISel library.
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (const MachineLegalizer *MLI = MF.getSubtarget().getMachineLegalizer()) {
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI)) {
if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI)) {
if (!TPC->isGlobalISelAbortEnabled()) {
MF.getProperties().set(
MachineFunctionProperties::Property::FailedISel);
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,8 @@ unsigned RegisterBankInfo::getSizeInBits(unsigned Reg,
// get the size of that register class.
RC = TRI.getMinimalPhysRegClass(Reg);
} else {
unsigned RegSize = MRI.getSize(Reg);
LLT Ty = MRI.getType(Reg);
unsigned RegSize = Ty.isSized() ? Ty.getSizeInBits() : 0;
// If Reg is not a generic register, query the register class to
// get its size.
if (RegSize)
Expand Down Expand Up @@ -566,7 +567,7 @@ void RegisterBankInfo::OperandsMapper::createVRegs(unsigned OpIdx) {
for (unsigned &NewVReg : NewVRegsForOpIdx) {
assert(PartMap != PartMapList.end() && "Out-of-bound access");
assert(NewVReg == 0 && "Register has already been created");
NewVReg = MRI.createGenericVirtualRegister(PartMap->Length);
NewVReg = MRI.createGenericVirtualRegister(LLT::scalar(PartMap->Length));
MRI.setRegBank(NewVReg, *PartMap->RegBank);
++PartMap;
}
Expand Down
32 changes: 6 additions & 26 deletions llvm/lib/CodeGen/MIRParser/MIParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -599,25 +599,6 @@ bool MIParser::parse(MachineInstr *&MI) {
if (Token.isError() || parseInstruction(OpCode, Flags))
return true;

SmallVector<LLT, 1> Tys;
if (isPreISelGenericOpcode(OpCode)) {
// For generic opcode, at least one type is mandatory.
auto Loc = Token.location();
bool ManyTypes = Token.is(MIToken::lbrace);
if (ManyTypes)
lex();

// Now actually parse the type(s).
do {
Tys.resize(Tys.size() + 1);
if (parseLowLevelType(Loc, Tys[Tys.size() - 1]))
return true;
} while (ManyTypes && consumeIfPresent(MIToken::comma));

if (ManyTypes)
expectAndConsume(MIToken::rbrace);
}

// Parse the remaining machine operands.
while (!Token.isNewlineOrEOF() && Token.isNot(MIToken::kw_debug_location) &&
Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) {
Expand Down Expand Up @@ -673,10 +654,6 @@ bool MIParser::parse(MachineInstr *&MI) {
// TODO: Check for extraneous machine operands.
MI = MF.CreateMachineInstr(MCID, DebugLocation, /*NoImplicit=*/true);
MI->setFlags(Flags);
if (Tys.size() > 0) {
for (unsigned i = 0; i < Tys.size(); ++i)
MI->setType(Tys[i], i);
}
for (const auto &Operand : Operands)
MI->addOperand(MF, Operand.Operand);
if (assignRegisterTies(*MI, Operands))
Expand Down Expand Up @@ -996,11 +973,14 @@ bool MIParser::parseRegisterOperand(MachineOperand &Dest,
if (MRI.getRegClassOrRegBank(Reg).is<const TargetRegisterClass *>())
return error("unexpected size on non-generic virtual register");

unsigned Size;
if (parseSize(Size))
LLT Ty;
if (parseLowLevelType(Token.location(), Ty))
return true;

if (expectAndConsume(MIToken::rparen))
return true;

MRI.setSize(Reg, Size);
MRI.setType(Reg, Ty);
} else if (PFS.GenericVRegs.count(Reg)) {
// Generic virtual registers must have a size.
// If we end up here this means the size hasn't been specified and
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/MIRParser/MIRParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ bool MIRParserImpl::initializeRegisterInfo(PerFunctionMIParsingState &PFS,
if (StringRef(VReg.Class.Value).equals("_")) {
// This is a generic virtual register.
// The size will be set appropriately when we reach the definition.
Reg = RegInfo.createGenericVirtualRegister(/*Size*/ 1);
Reg = RegInfo.createGenericVirtualRegister(LLT::scalar(1));
PFS.GenericVRegs.insert(Reg);
} else {
const auto *RC = getRegClass(MF, VReg.Class.Value);
Expand All @@ -428,7 +428,7 @@ bool MIRParserImpl::initializeRegisterInfo(PerFunctionMIParsingState &PFS,
VReg.Class.SourceRange.Start,
Twine("use of undefined register class or register bank '") +
VReg.Class.Value + "'");
Reg = RegInfo.createGenericVirtualRegister(/*Size*/ 1);
Reg = RegInfo.createGenericVirtualRegister(LLT::scalar(1));
RegInfo.setRegBank(Reg, *RegBank);
PFS.GenericVRegs.insert(Reg);
}
Expand Down
18 changes: 4 additions & 14 deletions llvm/lib/CodeGen/MIRPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,8 @@ void MIRPrinter::convert(yaml::MachineFunction &MF,
VReg.Class = StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
else {
VReg.Class = std::string("_");
assert(RegInfo.getSize(Reg) && "Generic registers must have a size");
assert(RegInfo.getType(Reg).isValid() &&
"Generic registers must have a valid type");
}
unsigned PreferredReg = RegInfo.getSimpleHint(Reg);
if (PreferredReg)
Expand Down Expand Up @@ -568,17 +569,6 @@ void MIPrinter::print(const MachineInstr &MI) {
if (MI.getFlag(MachineInstr::FrameSetup))
OS << "frame-setup ";
OS << TII->getName(MI.getOpcode());
if (isPreISelGenericOpcode(MI.getOpcode())) {
assert(MI.getType().isValid() && "Generic instructions must have a type");
unsigned NumTypes = MI.getNumTypes();
OS << (NumTypes > 1 ? " {" : "") << ' ';
for (unsigned i = 0; i < NumTypes; ++i) {
MI.getType(i).print(OS);
if (i + 1 != NumTypes)
OS << ", ";
}
OS << (NumTypes > 1 ? " }" : "") << ' ';
}
if (I < E)
OS << ' ';

Expand Down Expand Up @@ -787,8 +777,8 @@ void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
assert((!IsDef || MRI) && "for IsDef, MRI must be provided");
if (IsDef && MRI->getSize(Op.getReg()))
OS << '(' << MRI->getSize(Op.getReg()) << ')';
if (IsDef && MRI->getType(Op.getReg()).isValid())
OS << '(' << MRI->getType(Op.getReg()) << ')';
break;
case MachineOperand::MO_Immediate:
OS << Op.getImm();
Expand Down
61 changes: 5 additions & 56 deletions llvm/lib/CodeGen/MachineInstr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -680,12 +680,7 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
DebugLoc dl, bool NoImp)
: MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0), Flags(0),
AsmPrinterFlags(0), NumMemRefs(0), MemRefs(nullptr),
debugLoc(std::move(dl))
#ifdef LLVM_BUILD_GLOBAL_ISEL
,
Tys(0)
#endif
{
debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");

// Reserve space for the expected number of operands.
Expand All @@ -704,12 +699,7 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0),
Flags(0), AsmPrinterFlags(0), NumMemRefs(MI.NumMemRefs),
MemRefs(MI.MemRefs), debugLoc(MI.getDebugLoc())
#ifdef LLVM_BUILD_GLOBAL_ISEL
,
Tys(0)
#endif
{
MemRefs(MI.MemRefs), debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");

CapOperands = OperandCapacity::get(MI.getNumOperands());
Expand All @@ -732,37 +722,6 @@ MachineRegisterInfo *MachineInstr::getRegInfo() {
return nullptr;
}

// Implement dummy setter and getter for type when
// global-isel is not built.
// The proper implementation is WIP and is tracked here:
// PR26576.
#ifndef LLVM_BUILD_GLOBAL_ISEL
unsigned MachineInstr::getNumTypes() const { return 0; }

void MachineInstr::setType(LLT Ty, unsigned Idx) {}

LLT MachineInstr::getType(unsigned Idx) const { return LLT{}; }

void MachineInstr::removeTypes() {}

#else
unsigned MachineInstr::getNumTypes() const { return Tys.size(); }

void MachineInstr::setType(LLT Ty, unsigned Idx) {
assert((!Ty.isValid() || isPreISelGenericOpcode(getOpcode())) &&
"Non generic instructions are not supposed to be typed");
if (Tys.size() < Idx + 1)
Tys.resize(Idx+1);
Tys[Idx] = Ty;
}

LLT MachineInstr::getType(unsigned Idx) const { return Tys[Idx]; }

void MachineInstr::removeTypes() {
Tys.clear();
}
#endif // LLVM_BUILD_GLOBAL_ISEL

/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
Expand Down Expand Up @@ -1751,9 +1710,9 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
unsigned Reg = getOperand(StartOp).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
VirtRegs.push_back(Reg);
unsigned Size;
if (MRI && (Size = MRI->getSize(Reg)))
OS << '(' << Size << ')';
LLT Ty = MRI ? MRI->getType(Reg) : LLT{};
if (Ty.isValid())
OS << '(' << Ty << ')';
}
}

Expand All @@ -1766,16 +1725,6 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
else
OS << "UNKNOWN";

if (getNumTypes() > 0) {
OS << " { ";
for (unsigned i = 0; i < getNumTypes(); ++i) {
getType(i).print(OS);
if (i + 1 != getNumTypes())
OS << ", ";
}
OS << " } ";
}

if (SkipOpers)
return;

Expand Down
28 changes: 14 additions & 14 deletions llvm/lib/CodeGen/MachineRegisterInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,47 +112,47 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
return Reg;
}

unsigned
MachineRegisterInfo::getSize(unsigned VReg) const {
VRegToSizeMap::const_iterator SizeIt = getVRegToSize().find(VReg);
return SizeIt != getVRegToSize().end() ? SizeIt->second : 0;
LLT MachineRegisterInfo::getType(unsigned VReg) const {
VRegToTypeMap::const_iterator TypeIt = getVRegToType().find(VReg);
return TypeIt != getVRegToType().end() ? TypeIt->second : LLT{};
}

void MachineRegisterInfo::setSize(unsigned VReg, unsigned Size) {
void MachineRegisterInfo::setType(unsigned VReg, LLT Ty) {
// Check that VReg doesn't have a class.
assert(!getRegClassOrRegBank(VReg).is<const TargetRegisterClass *>() &&
"Can't set the size of a non-generic virtual register");
getVRegToSize()[VReg] = Size;
getVRegToType()[VReg] = Ty;
}

unsigned
MachineRegisterInfo::createGenericVirtualRegister(unsigned Size) {
assert(Size && "Cannot create empty virtual register");
MachineRegisterInfo::createGenericVirtualRegister(LLT Ty) {
assert(Ty.isValid() && "Cannot create empty virtual register");

// New virtual register number.
unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
VRegInfo.grow(Reg);
// FIXME: Should we use a dummy register bank?
VRegInfo[Reg].first = static_cast<RegisterBank *>(nullptr);
getVRegToSize()[Reg] = Size;
getVRegToType()[Reg] = Ty;
RegAllocHints.grow(Reg);
if (TheDelegate)
TheDelegate->MRI_NoteNewVirtualRegister(Reg);
return Reg;
}

void MachineRegisterInfo::clearVirtRegSizes() {
void MachineRegisterInfo::clearVirtRegTypes() {
#ifndef NDEBUG
// Verify that the size of the now-constrained vreg is unchanged.
for (auto &VRegToSize : getVRegToSize()) {
auto *RC = getRegClass(VRegToSize.first);
if (VRegToSize.second != (RC->getSize() * 8))
for (auto &VRegToType : getVRegToType()) {
auto *RC = getRegClass(VRegToType.first);
if (VRegToType.second.isSized() &&
VRegToType.second.getSizeInBits() > (RC->getSize() * 8))
llvm_unreachable(
"Virtual register has explicit size different from its class size");
}
#endif

getVRegToSize().clear();
getVRegToType().clear();
}

/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
Expand Down
36 changes: 24 additions & 12 deletions llvm/lib/CodeGen/MachineVerifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -887,16 +887,24 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
}

// Check types.
const unsigned NumTypes = MI->getNumTypes();
if (isPreISelGenericOpcode(MCID.getOpcode())) {
if (isFunctionSelected)
report("Unexpected generic instruction in a Selected function", MI);

if (NumTypes == 0)
report("Generic instruction must have a type", MI);
} else {
if (NumTypes != 0)
report("Non-generic instruction cannot have a type", MI);
// Generic instructions specify equality constraints between some
// of their operands. Make sure these are consistent.
SmallVector<LLT, 4> Types;
for (unsigned i = 0; i < MCID.getNumOperands(); ++i) {
if (!MCID.OpInfo[i].isGenericType())
continue;
size_t TypeIdx = MCID.OpInfo[i].getGenericTypeIndex();
Types.resize(std::max(TypeIdx + 1, Types.size()));

LLT OpTy = MRI->getType(MI->getOperand(i).getReg());
if (Types[TypeIdx].isValid() && Types[TypeIdx] != OpTy)
report("type mismatch in generic instruction", MI);
Types[TypeIdx] = OpTy;
}
}

// Generic opcodes must not have physical register operands.
Expand Down Expand Up @@ -1026,9 +1034,10 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}

// The gvreg must have a size and it must not have a SubIdx.
unsigned Size = MRI->getSize(Reg);
if (!Size) {
report("Generic virtual register must have a size", MO, MONum);
LLT Ty = MRI->getType(Reg);
if (!Ty.isValid()) {
report("Generic virtual register must have a valid type", MO,
MONum);
return;
}

Expand All @@ -1043,15 +1052,18 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}

// Make sure the register fits into its register bank if any.
if (RegBank && RegBank->getSize() < Size) {
if (RegBank && Ty.isSized() &&
RegBank->getSize() < Ty.getSizeInBits()) {
report("Register bank is too small for virtual register", MO,
MONum);
errs() << "Register bank " << RegBank->getName() << " too small("
<< RegBank->getSize() << ") to fit " << Size << "-bits\n";
<< RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
<< "-bits\n";
return;
}
if (SubIdx) {
report("Generic virtual register does not subregister index", MO, MONum);
report("Generic virtual register does not subregister index", MO,
MONum);
return;
}
break;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AArch64/AArch64CallLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ bool AArch64CallLowering::lowerFormalArguments(
[](MachineIRBuilder &MIRBuilder, Type *Ty,
unsigned ValReg, unsigned PhysReg) {
MIRBuilder.getMBB().addLiveIn(PhysReg);
MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
MIRBuilder.buildCopy(ValReg, PhysReg);
});
}

Expand Down Expand Up @@ -172,7 +172,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
handleAssignments(MIRBuilder, RetAssignFn, ResTys, ResRegs,
[&](MachineIRBuilder &MIRBuilder, Type *Ty,
unsigned ValReg, unsigned PhysReg) {
MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
MIRBuilder.buildCopy(ValReg, PhysReg);
MIB.addDef(PhysReg, RegState::Implicit);
});

Expand Down
25 changes: 10 additions & 15 deletions llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ static bool unsupportedBinOp(const MachineInstr &I,
const AArch64RegisterBankInfo &RBI,
const MachineRegisterInfo &MRI,
const AArch64RegisterInfo &TRI) {
if (!I.getType().isSized()) {
LLT Ty = MRI.getType(I.getOperand(0).getReg());
if (!Ty.isSized()) {
DEBUG(dbgs() << "Generic binop should be sized\n");
return true;
}
Expand Down Expand Up @@ -219,38 +220,35 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return false;
}

const LLT Ty = I.getType();
const LLT Ty = I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg())
: LLT::unsized();
assert(Ty.isValid() && "Generic instruction doesn't have a type");

switch (I.getOpcode()) {
case TargetOpcode::G_BR: {
I.setDesc(TII.get(AArch64::B));
I.removeTypes();
return true;
}

case TargetOpcode::G_TYPE: {
I.setDesc(TII.get(TargetOpcode::COPY));
I.removeTypes();
return true;
}

case TargetOpcode::G_PHI: {
I.setDesc(TII.get(TargetOpcode::PHI));
I.removeTypes();
return true;
}

case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
if (I.getType() != LLT::pointer(0)) {
DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << I.getType()
if (Ty != LLT::pointer(0)) {
DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
<< ", expected: " << LLT::pointer(0) << '\n');
return false;
}

I.setDesc(TII.get(AArch64::ADDXri));
I.removeTypes();

// MOs for a #0 shifted immediate.
I.addOperand(MachineOperand::CreateImm(0));
Expand All @@ -260,8 +258,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
}
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE: {
LLT MemTy = I.getType(0);
LLT PtrTy = I.getType(1);
LLT MemTy = Ty;
LLT PtrTy = MRI.getType(I.getOperand(1).getReg());

if (PtrTy != LLT::pointer(0)) {
DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
Expand All @@ -275,8 +273,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
assert(PtrRB.getID() == AArch64::GPRRegBankID &&
"Load/Store pointer operand isn't a GPR");
assert(MRI.getSize(PtrReg) == 64 &&
"Load/Store pointer operand isn't 64-bit");
assert(MRI.getType(PtrReg).isPointer() &&
"Load/Store pointer operand isn't a pointer");
#endif

const unsigned ValReg = I.getOperand(0).getReg();
Expand All @@ -288,7 +286,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return false;

I.setDesc(TII.get(NewOpc));
I.removeTypes();

I.addOperand(MachineOperand::CreateImm(0));
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
Expand Down Expand Up @@ -322,7 +319,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
}

I.setDesc(TII.get(NewOpc));
I.removeTypes();

I.addOperand(MachineOperand::CreateReg(ZeroReg, /*isDef=*/false));

Expand Down Expand Up @@ -361,7 +357,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {

I.setDesc(TII.get(NewOpc));
// FIXME: Should the type be always reset in setDesc?
I.removeTypes();

// Now that we selected an opcode, we need to constrain the register
// operands to use appropriate classes.
Expand Down
5 changes: 4 additions & 1 deletion llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

#include "AArch64RegisterBankInfo.h"
#include "AArch64InstrInfo.h" // For XXXRegClassID.
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
Expand Down Expand Up @@ -177,7 +179,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
// won't work for normal floating-point types (or NZCV). When such
// instructions exist we'll need to look at the MI's opcode.
LLT Ty = MI.getType();
auto &MRI = MI.getParent()->getParent()->getRegInfo();
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned BankID;
if (Ty.isVector())
BankID = AArch64::FPRRegBankID;
Expand Down
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-linux-gnu"

; CHECK-LABEL: name: args_i32
; CHECK: %[[ARG0:[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w1
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w2
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w3
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w4
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w5
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w6
; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w7
; CHECK: %[[ARG0:[0-9]+]](s32) = COPY %w0
; CHECK: %{{[0-9]+}}(s32) = COPY %w1
; CHECK: %{{[0-9]+}}(s32) = COPY %w2
; CHECK: %{{[0-9]+}}(s32) = COPY %w3
; CHECK: %{{[0-9]+}}(s32) = COPY %w4
; CHECK: %{{[0-9]+}}(s32) = COPY %w5
; CHECK: %{{[0-9]+}}(s32) = COPY %w6
; CHECK: %{{[0-9]+}}(s32) = COPY %w7
; CHECK: %w0 = COPY %[[ARG0]]

define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
Expand All @@ -20,14 +20,14 @@ define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
}

; CHECK-LABEL: name: args_i64
; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x1
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x2
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x3
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x4
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x5
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x6
; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x7
; CHECK: %[[ARG0:[0-9]+]](s64) = COPY %x0
; CHECK: %{{[0-9]+}}(s64) = COPY %x1
; CHECK: %{{[0-9]+}}(s64) = COPY %x2
; CHECK: %{{[0-9]+}}(s64) = COPY %x3
; CHECK: %{{[0-9]+}}(s64) = COPY %x4
; CHECK: %{{[0-9]+}}(s64) = COPY %x5
; CHECK: %{{[0-9]+}}(s64) = COPY %x6
; CHECK: %{{[0-9]+}}(s64) = COPY %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
i64 %x4, i64 %x5, i64 %x6, i64 %x7) {
Expand All @@ -36,14 +36,14 @@ define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,


; CHECK-LABEL: name: args_ptrs
; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x1
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x2
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x3
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x4
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x5
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x6
; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x7
; CHECK: %[[ARG0:[0-9]+]](p0) = COPY %x0
; CHECK: %{{[0-9]+}}(p0) = COPY %x1
; CHECK: %{{[0-9]+}}(p0) = COPY %x2
; CHECK: %{{[0-9]+}}(p0) = COPY %x3
; CHECK: %{{[0-9]+}}(p0) = COPY %x4
; CHECK: %{{[0-9]+}}(p0) = COPY %x5
; CHECK: %{{[0-9]+}}(p0) = COPY %x6
; CHECK: %{{[0-9]+}}(p0) = COPY %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
[3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
Expand Down
204 changes: 102 additions & 102 deletions llvm/test/CodeGen/AArch64/GlobalISel/arm64-instructionselect.mir

Large diffs are not rendered by default.

450 changes: 225 additions & 225 deletions llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll

Large diffs are not rendered by default.

124 changes: 63 additions & 61 deletions llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,9 @@ registers:
body: |
bb.0.entry:
liveins: %x0
; CHECK: %1(32) = G_ADD s32 %0
%0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
; CHECK: %1(s32) = G_ADD %0
%0(s32) = COPY %w0
%1(s32) = G_ADD %0, %0
...

---
Expand All @@ -95,10 +95,10 @@ registers:
body: |
bb.0.entry:
liveins: %d0
; CHECK: %0(64) = G_TYPE s64 %d0
; CHECK: %1(64) = G_ADD <2 x s32> %0
%0(64) = G_TYPE s64 %d0
%1(64) = G_ADD <2 x s32> %0, %0
; CHECK: %0(<2 x s32>) = COPY %d0
; CHECK: %1(<2 x s32>) = G_ADD %0
%0(<2 x s32>) = COPY %d0
%1(<2 x s32>) = G_ADD %0, %0
...

---
Expand All @@ -120,13 +120,13 @@ registers:
body: |
bb.0.entry:
liveins: %s0, %x0
; CHECK: %0(32) = G_TYPE s32 %s0
; CHECK-NEXT: %1(32) = G_TYPE s32 %w0
; CHECK-NEXT: %3(32) = COPY %0
; CHECK-NEXT: %2(32) = G_ADD s32 %3, %1
%0(32) = G_TYPE s32 %s0
%1(32) = G_TYPE s32 %w0
%2(32) = G_ADD s32 %0, %1
; CHECK: %0(s32) = COPY %s0
; CHECK-NEXT: %1(s32) = COPY %w0
; CHECK-NEXT: %3(s32) = COPY %0
; CHECK-NEXT: %2(s32) = G_ADD %3, %1
%0(s32) = COPY %s0
%1(s32) = COPY %w0
%2(s32) = G_ADD %0, %1
...

# Check that we repair the assignment for %0 differently for both uses.
Expand All @@ -144,12 +144,12 @@ registers:
body: |
bb.0.entry:
liveins: %s0, %x0
; CHECK: %0(32) = G_TYPE s32 %s0
; CHECK-NEXT: %2(32) = COPY %0
; CHECK-NEXT: %3(32) = COPY %0
; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3
%0(32) = G_TYPE s32 %s0
%1(32) = G_ADD s32 %0, %0
; CHECK: %0(s32) = COPY %s0
; CHECK-NEXT: %2(s32) = COPY %0
; CHECK-NEXT: %3(s32) = COPY %0
; CHECK-NEXT: %1(s32) = G_ADD %2, %3
%0(s32) = COPY %s0
%1(s32) = G_ADD %0, %0
...

---
Expand All @@ -170,11 +170,11 @@ registers:
body: |
bb.0.entry:
liveins: %w0
; CHECK: %0(32) = G_TYPE s32 %w0
; CHECK-NEXT: %2(32) = G_ADD s32 %0, %0
; CHECK-NEXT: %1(32) = COPY %2
%0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
; CHECK: %0(s32) = COPY %w0
; CHECK-NEXT: %2(s32) = G_ADD %0, %0
; CHECK-NEXT: %1(s32) = COPY %2
%0(s32) = COPY %w0
%1(s32) = G_ADD %0, %0
...

---
Expand All @@ -194,22 +194,24 @@ registers:
- { id: 2, class: gpr32 }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
body: |
bb.0.entry:
successors: %bb.2.end, %bb.1.then
liveins: %x0, %x1, %w2
%0 = LDRWui killed %x0, 0 :: (load 4 from %ir.src)
%1 = G_TYPE s64 %x1
%2 = G_TYPE s32 %w2
%5(s32) = COPY %0
%1 = COPY %x1
%2 = COPY %w2
TBNZW killed %2, 0, %bb.2.end
bb.1.then:
successors: %bb.2.end
%3(32) = G_ADD s32 %0, %0
%3(s32) = G_ADD %5, %5
bb.2.end:
%4(32) = PHI %0, %bb.0.entry, %3, %bb.1.then
%4(s32) = PHI %0, %bb.0.entry, %3, %bb.1.then
STRWui killed %4, killed %1, 0 :: (store 4 into %ir.dst)
RET_ReallyLR
...
Expand All @@ -231,13 +233,13 @@ registers:
body: |
bb.0.entry:
liveins: %w0, %s0
; CHECK: %0(32) = G_TYPE s32 %w0
; CHECK-NEXT: %1(32) = G_TYPE s32 %s0
; CHECK-NEXT: %3(32) = COPY %1
; CHECK-NEXT: %2(32) = G_ADD s32 %0, %3
%0(32) = G_TYPE s32 %w0
%1(32) = G_TYPE s32 %s0
%2(32) = G_ADD s32 %0, %1
; CHECK: %0(s32) = COPY %w0
; CHECK-NEXT: %1(s32) = COPY %s0
; CHECK-NEXT: %3(s32) = COPY %1
; CHECK-NEXT: %2(s32) = G_ADD %0, %3
%0(s32) = COPY %w0
%1(s32) = COPY %s0
%2(s32) = G_ADD %0, %1
...

---
Expand All @@ -254,11 +256,11 @@ registers:
body: |
bb.0.entry:
liveins: %w0
; CHECK: %0(32) = G_TYPE s32 %w0
; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0
; CHECK: %0(s32) = COPY %w0
; CHECK-NEXT: %1(s32) = G_ADD %0, %0
; CHECK-NEXT: %s0 = COPY %1
%0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
%0(s32) = COPY %w0
%1(s32) = G_ADD %0, %0
%s0 = COPY %1
...

Expand Down Expand Up @@ -289,23 +291,23 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1
; CHECK: %0(64) = G_TYPE s64 %x0
; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
; CHECK: %0(<2 x s32>) = COPY %x0
; CHECK-NEXT: %1(<2 x s32>) = COPY %x1
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
; FAST-NEXT: %3(64) = COPY %0
; FAST-NEXT: %4(64) = COPY %1
; FAST-NEXT: %3(s64) = COPY %0
; FAST-NEXT: %4(s64) = COPY %1
; The mapping of G_OR is on FPR.
; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
; Greedy mode remapped the instruction on the GPR bank.
; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(64) = G_OR <2 x s32> %0, %1
; GREEDY-NEXT: %2(<2 x s32>) = G_OR %0, %1
%0(<2 x s32>) = COPY %x0
%1(<2 x s32>) = COPY %x1
%2(<2 x s32>) = G_OR %0, %1
...

---
Expand Down Expand Up @@ -336,25 +338,25 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1
; CHECK: %0(64) = G_TYPE s64 %x0
; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
; CHECK: %0(<2 x s32>) = COPY %x0
; CHECK-NEXT: %1(<2 x s32>) = COPY %x1
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
; FAST-NEXT: %3(64) = COPY %0
; FAST-NEXT: %4(64) = COPY %1
; FAST-NEXT: %3(s64) = COPY %0
; FAST-NEXT: %4(s64) = COPY %1
; The mapping of G_OR is on FPR.
; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
; Greedy mode remapped the instruction on the GPR bank.
; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1
; GREEDY-NEXT: %3(s64) = G_OR %0, %1
; We need to keep %2 into FPR because we do not know anything about it.
; GREEDY-NEXT: %2(64) = COPY %3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(64) = G_OR <2 x s32> %0, %1
; GREEDY-NEXT: %2(<2 x s32>) = COPY %3
%0(<2 x s32>) = COPY %x0
%1(<2 x s32>) = COPY %x1
%2(<2 x s32>) = G_OR %0, %1
...

---
Expand All @@ -371,12 +373,12 @@ body: |
bb.0:
liveins: %x0
; CHECK: %0 = G_TYPE s64 %x0
; CHECK: %0 = COPY %x0
; CHECK-NEXT: %1 = ADDXrr %0, %0
; CHECK-NEXT: %x0 = COPY %1
; CHECK-NEXT: RET_ReallyLR implicit %x0
%0 = G_TYPE s64 %x0
%0 = COPY %x0
%1 = ADDXrr %0, %0
%x0 = COPY %1
RET_ReallyLR implicit %x0
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define void @test_trivial_call() {

; CHECK-LABEL: name: test_simple_return
; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit-def %x0
; CHECK: [[RES:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[RES:%[0-9]+]](s64) = COPY %x0
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
declare i64 @simple_return_callee()
Expand All @@ -20,7 +20,7 @@ define i64 @test_simple_return() {
}

; CHECK-LABEL: name: test_simple_arg
; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
; CHECK: %w0 = COPY [[IN]]
; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
; CHECK: RET_ReallyLR
Expand All @@ -31,7 +31,7 @@ define void @test_simple_arg(i32 %in) {
}

; CHECK-LABEL: name: test_indirect_call
; CHECK: [[FUNC:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[FUNC:%[0-9]+]](p0) = COPY %x0
; CHECK: BLR [[FUNC]], csr_aarch64_aapcs, implicit-def %lr, implicit %sp
; CHECK: RET_ReallyLR
define void @test_indirect_call(void()* %func) {
Expand All @@ -40,8 +40,8 @@ define void @test_indirect_call(void()* %func) {
}

; CHECK-LABEL: name: test_multiple_args
; CHECK: [[IN:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[ANSWER:%[0-9]+]](32) = G_CONSTANT s32 42
; CHECK: [[IN:%[0-9]+]](s64) = COPY %x0
; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT 42
; CHECK: %w0 = COPY [[ANSWER]]
; CHECK: %x1 = COPY [[IN]]
; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit %x1
Expand Down
58 changes: 29 additions & 29 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
Original file line number Diff line number Diff line change
Expand Up @@ -35,23 +35,23 @@ body: |
; CHECK-LABEL: name: test_scalar_add_big
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK-DAG: [[CARRY0_32:%.*]](32) = G_CONSTANT s32 0
; CHECK-DAG: [[CARRY0:%[0-9]+]](1) = G_TRUNC { s1, s32 } [[CARRY0_32]]
; CHECK: [[RES_LO:%.*]](64), [[CARRY:%.*]](1) = G_UADDE s64 %0, %2, [[CARRY0]]
; CHECK: [[RES_HI:%.*]](64), {{%.*}}(1) = G_UADDE s64 %1, %3, [[CARRY]]
; CHECK-DAG: [[CARRY0_32:%.*]](s32) = G_CONSTANT 0
; CHECK-DAG: [[CARRY0:%[0-9]+]](s1) = G_TRUNC [[CARRY0_32]]
; CHECK: [[RES_LO:%.*]](s64), [[CARRY:%.*]](s1) = G_UADDE %0, %2, [[CARRY0]]
; CHECK: [[RES_HI:%.*]](s64), {{%.*}}(s1) = G_UADDE %1, %3, [[CARRY]]
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: %x0 = COPY [[RES_LO]]
; CHECK: %x1 = COPY [[RES_HI]]
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(64) = G_TYPE s64 %x2
%3(64) = G_TYPE s64 %x3
%4(128) = G_SEQUENCE { s128, s64, s64 } %0, 0, %1, 64
%5(128) = G_SEQUENCE { s128, s64, s64 } %2, 0, %3, 64
%6(128) = G_ADD s128 %4, %5
%7(64), %8(64) = G_EXTRACT { s64, s64, s128 } %6, 0, 64
%0(s64) = COPY %x0
%1(s64) = COPY %x1
%2(s64) = COPY %x2
%3(s64) = COPY %x3
%4(s128) = G_SEQUENCE %0, 0, %1, 64
%5(s128) = G_SEQUENCE %2, 0, %3, 64
%6(s128) = G_ADD %4, %5
%7(s64), %8(s64) = G_EXTRACT %6, 0, 64
%x0 = COPY %7
%x1 = COPY %8
...
Expand All @@ -69,14 +69,14 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_add_small
; CHECK: [[RES:%.*]](8) = G_ADD s8 %2, %3
; CHECK: [[RES:%.*]](s8) = G_ADD %2, %3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_ADD s8 %2, %3
%5(64) = G_ANYEXT { s64, s8 } %4
%0(s64) = COPY %x0
%1(s64) = COPY %x1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
%4(s8) = G_ADD %2, %3
%5(s64) = G_ANYEXT %4
%x0 = COPY %5
...

Expand All @@ -98,21 +98,21 @@ body: |
; CHECK-LABEL: name: test_vector_add
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: [[RES_LO:%.*]](128) = G_ADD <2 x s64> %0, %2
; CHECK: [[RES_HI:%.*]](128) = G_ADD <2 x s64> %1, %3
; CHECK: [[RES_LO:%.*]](<2 x s64>) = G_ADD %0, %2
; CHECK: [[RES_HI:%.*]](<2 x s64>) = G_ADD %1, %3
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: %q0 = COPY [[RES_LO]]
; CHECK: %q1 = COPY [[RES_HI]]
%0(128) = G_TYPE s128 %q0
%1(128) = G_TYPE s128 %q1
%2(128) = G_TYPE s128 %q2
%3(128) = G_TYPE s128 %q3
%4(256) = G_SEQUENCE { s256, s128, s128 } %0, 0, %1, 128
%5(256) = G_SEQUENCE { s256, s128, s128 } %2, 0, %3, 128
%6(256) = G_ADD <4 x s64> %4, %5
%7(128), %8(128) = G_EXTRACT { s128, s128, s256 } %6, 0, 128
%0(<2 x s64>) = COPY %q0
%1(<2 x s64>) = COPY %q1
%2(<2 x s64>) = COPY %q2
%3(<2 x s64>) = COPY %q3
%4(<4 x s64>) = G_SEQUENCE %0, 0, %1, 128
%5(<4 x s64>) = G_SEQUENCE %2, 0, %3, 128
%6(<4 x s64>) = G_ADD %4, %5
%7(<2 x s64>), %8(<2 x s64>) = G_EXTRACT %6, 0, 128
%q0 = COPY %7
%q1 = COPY %8
...
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_and_small
; CHECK: %4(8) = G_AND s8 %2, %3
; CHECK: %4(s8) = G_AND %2, %3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_AND s8 %2, %3
%5(64) = G_ANYEXT { s64, s8 } %2
%0(s64) = G_TYPE %x0
%1(s64) = G_TYPE %x1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
%4(s8) = G_AND %2, %3
%5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,17 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x0
%0(s64) = COPY %x0
%1(s64) = COPY %x0
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
; CHECK: %4(1) = G_ICMP { s1, s64 } intpred(sge), %0, %1
%4(1) = G_ICMP { s1, s64 } intpred(sge), %0, %1
; CHECK: %4(s1) = G_ICMP intpred(sge), %0, %1
%4(s1) = G_ICMP intpred(sge), %0, %1
; CHECK: [[LHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %2
; CHECK: [[RHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %3
; CHECK: %8(1) = G_ICMP { s1, s32 } intpred(ult), [[LHS32]], [[RHS32]]
%8(1) = G_ICMP { s1, s8 } intpred(ult), %2, %3
; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2
; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3
; CHECK: %8(s1) = G_ICMP intpred(ult), [[LHS32]], [[RHS32]]
%8(s1) = G_ICMP intpred(ult), %2, %3
...
70 changes: 35 additions & 35 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
Original file line number Diff line number Diff line change
Expand Up @@ -41,52 +41,52 @@ body: |
bb.0.entry:
liveins: %w0, %w1, %x2, %x3
%0(32) = G_TYPE s32 %w0
%1(32) = G_TYPE s32 %w1
%2(8) = G_TRUNC { s8, s32 } %0
%0(s32) = COPY %w0
%1(s32) = COPY %w1
%2(s8) = G_TRUNC %0
; Only one of these extracts can be eliminated, the offsets don't match
; properly in the other cases.
; CHECK-LABEL: name: test_combines
; CHECK: %3(32) = G_SEQUENCE { s32, s8 } %2, 1
; CHECK: %4(8) = G_EXTRACT { s8, s32 } %3, 0
; CHECK: %3(s32) = G_SEQUENCE %2, 1
; CHECK: %4(s8) = G_EXTRACT %3, 0
; CHECK-NOT: G_EXTRACT
; CHECK: %6(8) = G_EXTRACT { s8, s32 } %3, 2
; CHECK: %7(32) = G_ZEXT { s32, s8 } %2
%3(32) = G_SEQUENCE { s32, s8 } %2, 1
%4(8) = G_EXTRACT { s8, s32 } %3, 0
%5(8) = G_EXTRACT { s8, s32 } %3, 1
%6(8) = G_EXTRACT { s8, s32 } %3, 2
%7(32) = G_ZEXT { s32, s8 } %5
; CHECK: %6(s8) = G_EXTRACT %3, 2
; CHECK: %7(s32) = G_ZEXT %2
%3(s32) = G_SEQUENCE %2, 1
%4(s8) = G_EXTRACT %3, 0
%5(s8) = G_EXTRACT %3, 1
%6(s8) = G_EXTRACT %3, 2
%7(s32) = G_ZEXT %5
; Similarly, here the types don't match.
; CHECK: %10(32) = G_SEQUENCE { s32, s16, s16 } %8, 0, %9, 16
; CHECK: %11(1) = G_EXTRACT { s1, s32 } %10, 0
; CHECK: %12(32) = G_EXTRACT { s32, s32 } %10, 0
%8(16) = G_TRUNC { s16, s32 } %0
%9(16) = G_ADD s16 %8, %8
%10(32) = G_SEQUENCE { s32, s16, s16 } %8, 0, %9, 16
%11(1) = G_EXTRACT { s1, s32 } %10, 0
%12(32) = G_EXTRACT { s32, s32 } %10, 0
; CHECK: %10(s32) = G_SEQUENCE %8, 0, %9, 16
; CHECK: %11(s1) = G_EXTRACT %10, 0
; CHECK: %12(s32) = G_EXTRACT %10, 0
%8(s16) = G_TRUNC %0
%9(s16) = G_ADD %8, %8
%10(s32) = G_SEQUENCE %8, 0, %9, 16
%11(s1) = G_EXTRACT %10, 0
%12(s32) = G_EXTRACT %10, 0
; CHECK-NOT: G_EXTRACT
; CHECK: %15(16) = G_ADD s16 %8, %9
%13(16), %14(16) = G_EXTRACT { s16, s16, s32 } %10, 0, 16
%15(16) = G_ADD s16 %13, %14
; CHECK: %15(s16) = G_ADD %8, %9
%13(s16), %14(s16) = G_EXTRACT %10, 0, 16
%15(s16) = G_ADD %13, %14
; CHECK: %18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
; CHECK: %19(64) = G_ADD <2 x s32> %18, %18
%16(64) = G_TYPE s64 %x0
%17(128) = G_SEQUENCE { s128, s64, s64 } %16, 0, %16, 64
%18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
%19(64) = G_ADD <2 x s32> %18, %18
; CHECK: %18(<2 x s32>) = G_EXTRACT %17, 0
; CHECK: %19(<2 x s32>) = G_ADD %18, %18
%16(s64) = COPY %x0
%17(s128) = G_SEQUENCE %16, 0, %16, 64
%18(<2 x s32>) = G_EXTRACT %17, 0
%19(<2 x s32>) = G_ADD %18, %18
; CHECK-NOT: G_SEQUENCE
; CHECK-NOT: G_EXTRACT
; CHECK: %24(32) = G_ADD s32 %0, %20
%20(32) = G_ADD s32 %0, %0
%21(64) = G_SEQUENCE { s64, s32, s32 } %0, 0, %20, 32
%22(32) = G_EXTRACT { s32, s64 } %21, 0
%23(32) = G_EXTRACT { s32, s64 } %21, 32
%24(32) = G_ADD s32 %22, %23
; CHECK: %24(s32) = G_ADD %0, %20
%20(s32) = G_ADD %0, %0
%21(s64) = G_SEQUENCE %0, 0, %20, 32
%22(s32) = G_EXTRACT %21, 0
%23(s32) = G_EXTRACT %21, 32
%24(s32) = G_ADD %22, %23
...
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,22 @@ registers:
body: |
bb.0.entry:
; CHECK-LABEL: name: test_constant
; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 0
; CHECK: %0(1) = G_TRUNC { s1, s32 } [[TMP]]
; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 42
; CHECK: %1(8) = G_TRUNC { s8, s32 } [[TMP]]
; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 65535
; CHECK: %2(16) = G_TRUNC { s16, s32 } [[TMP]]
; CHECK: %3(32) = G_CONSTANT s32 -1
; CHECK: %4(64) = G_CONSTANT s64 1
; CHECK: %5(64) = G_CONSTANT p0 0
; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 0
; CHECK: %0(s1) = G_TRUNC [[TMP]]
; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 42
; CHECK: %1(s8) = G_TRUNC [[TMP]]
; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 65535
; CHECK: %2(s16) = G_TRUNC [[TMP]]
; CHECK: %3(s32) = G_CONSTANT -1
; CHECK: %4(s64) = G_CONSTANT 1
; CHECK: %5(s64) = G_CONSTANT 0
%0(1) = G_CONSTANT s1 0
%1(8) = G_CONSTANT s8 42
%2(16) = G_CONSTANT s16 65535
%3(32) = G_CONSTANT s32 -1
%4(64) = G_CONSTANT s64 1
%5(64) = G_CONSTANT p0 0
%0(s1) = G_CONSTANT 0
%1(s8) = G_CONSTANT 42
%2(s16) = G_CONSTANT 65535
%3(s32) = G_CONSTANT -1
%4(s64) = G_CONSTANT 1
%5(s64) = G_CONSTANT 0
...

---
Expand All @@ -52,12 +52,12 @@ registers:
body: |
bb.0.entry:
; CHECK-LABEL: name: test_fconstant
; CHECK: %0(32) = G_FCONSTANT s32 float 1.000000e+00
; CHECK: %1(64) = G_FCONSTANT s64 double 2.000000e+00
; CHECK: [[TMP:%[0-9]+]](32) = G_FCONSTANT s32 half 0xH0000
; CHECK; %2(16) = G_FPTRUNC { s16, s32 } [[TMP]]
; CHECK: %0(s32) = G_FCONSTANT float 1.000000e+00
; CHECK: %1(s64) = G_FCONSTANT double 2.000000e+00
; CHECK: [[TMP:%[0-9]+]](s32) = G_FCONSTANT half 0xH0000
; CHECK; %2(s16) = G_FPTRUNC [[TMP]]
%0(32) = G_FCONSTANT s32 float 1.0
%1(64) = G_FCONSTANT s64 double 2.0
%2(16) = G_FCONSTANT s16 half 0.0
%0(s32) = G_FCONSTANT float 1.0
%1(s64) = G_FCONSTANT double 2.0
%2(s16) = G_FCONSTANT half 0.0
...
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,22 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(32) = G_TRUNC { s8, s64 } %0
%3(32) = G_TRUNC { s8, s64 } %1
%0(s64) = COPY %x0
%1(s64) = COPY %x1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
; CHECK: [[LHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %2
; CHECK: [[RHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %3
; CHECK: [[QUOT32:%[0-9]+]](32) = G_SDIV s32 [[LHS32]], [[RHS32]]
; CHECK: [[RES:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
%4(8) = G_SDIV s8 %2, %3
; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %2
; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %3
; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]]
; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
%4(s8) = G_SDIV %2, %3
; CHECK: [[LHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %2
; CHECK: [[RHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %3
; CHECK: [[QUOT32:%[0-9]+]](32) = G_UDIV s32 [[LHS32]], [[RHS32]]
; CHECK: [[RES:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
%5(8) = G_UDIV s8 %2, %3
; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2
; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3
; CHECK: [[QUOT32:%[0-9]+]](s32) = G_UDIV [[LHS32]], [[RHS32]]
; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
%5(s8) = G_UDIV %2, %3
...
72 changes: 36 additions & 36 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
Original file line number Diff line number Diff line change
Expand Up @@ -34,46 +34,46 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
%0(64) = G_TYPE s64 %x0
%0(s64) = G_TYPE %x0
; CHECK: %1(1) = G_TRUNC { s1, s64 } %0
; CHECK: %2(8) = G_TRUNC { s8, s64 } %0
; CHECK: %3(16) = G_TRUNC { s16, s64 } %0
; CHECK: %4(32) = G_TRUNC { s16, s64 } %0
%1(1) = G_TRUNC { s1, s64 } %0
%2(8) = G_TRUNC { s8, s64 } %0
%3(16) = G_TRUNC { s16, s64 } %0
%4(32) = G_TRUNC { s16, s64 } %0
; CHECK: %1(s1) = G_TRUNC %0
; CHECK: %2(s8) = G_TRUNC %0
; CHECK: %3(s16) = G_TRUNC %0
; CHECK: %4(s32) = G_TRUNC %0
%1(s1) = G_TRUNC %0
%2(s8) = G_TRUNC %0
%3(s16) = G_TRUNC %0
%4(s32) = G_TRUNC %0
; CHECK: %5(64) = G_ANYEXT { s64, s1 } %1
; CHECK: %6(64) = G_ZEXT { s64, s8 } %2
; CHECK: %7(64) = G_ANYEXT { s64, s16 } %3
; CHECK: %8(64) = G_SEXT { s64, s32 } %4
%5(64) = G_ANYEXT { s64, s1 } %1
%6(64) = G_ZEXT { s64, s8 } %2
%7(64) = G_ANYEXT { s64, s16 } %3
%8(64) = G_SEXT { s64, s32 } %4
; CHECK: %5(s64) = G_ANYEXT %1
; CHECK: %6(s64) = G_ZEXT %2
; CHECK: %7(s64) = G_ANYEXT %3
; CHECK: %8(s64) = G_SEXT %4
%5(s64) = G_ANYEXT %1
%6(s64) = G_ZEXT %2
%7(s64) = G_ANYEXT %3
%8(s64) = G_SEXT %4
; CHECK: %9(32) = G_SEXT { s32, s1 } %1
; CHECK: %10(32) = G_ZEXT { s32, s8 } %2
; CHECK: %11(32) = G_ANYEXT { s32, s16 } %3
%9(32) = G_SEXT { s32, s1 } %1
%10(32) = G_ZEXT { s32, s8 } %2
%11(32) = G_ANYEXT { s32, s16 } %3
; CHECK: %9(s32) = G_SEXT %1
; CHECK: %10(s32) = G_ZEXT %2
; CHECK: %11(s32) = G_ANYEXT %3
%9(s32) = G_SEXT %1
%10(s32) = G_ZEXT %2
%11(s32) = G_ANYEXT %3
; CHECK: %12(32) = G_ZEXT { s32, s1 } %1
; CHECK: %13(32) = G_ANYEXT { s32, s8 } %2
; CHECK: %14(32) = G_SEXT { s32, s16 } %3
%12(32) = G_ZEXT { s32, s1 } %1
%13(32) = G_ANYEXT { s32, s8 } %2
%14(32) = G_SEXT { s32, s16 } %3
; CHECK: %12(s32) = G_ZEXT %1
; CHECK: %13(s32) = G_ANYEXT %2
; CHECK: %14(s32) = G_SEXT %3
%12(s32) = G_ZEXT %1
%13(s32) = G_ANYEXT %2
%14(s32) = G_SEXT %3
; CHECK: %15(8) = G_ZEXT { s8, s1 } %1
; CHECK: %16(16) = G_ANYEXT { s16, s8 } %2
%15(8) = G_ZEXT { s8, s1 } %1
%16(16) = G_ANYEXT { s16, s8 } %2
; CHECK: %15(s8) = G_ZEXT %1
; CHECK: %16(s16) = G_ANYEXT %2
%15(s8) = G_ZEXT %1
%16(s16) = G_ANYEXT %2
; CHECK: %18(64) = G_FPEXT { s64, s32 } %17
%17(32) = G_TRUNC { s32, s64 } %0
%18(64) = G_FPEXT { s64, s32 } %17
; CHECK: %18(s64) = G_FPEXT %17
%17(s32) = G_TRUNC %0
%18(s64) = G_FPEXT %17
...
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@ registers:
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x0
%0(s64) = G_TYPE %x0
%1(s64) = G_TYPE %x0
%2(8) = G_TRUNC { s32, s64 } %0
%3(8) = G_TRUNC { s32, s64 } %1
%2(s32) = G_TRUNC %0
%3(s32) = G_TRUNC %1
; CHECK: %4(1) = G_FCMP { s1, s64 } floatpred(oge), %0, %1
%4(1) = G_FCMP { s1, s64 } floatpred(oge), %0, %1
; CHECK: %4(s1) = G_FCMP floatpred(oge), %0, %1
%4(s1) = G_FCMP floatpred(oge), %0, %1
; CHECK: %5(1) = G_FCMP { s1, s32 } floatpred(uno), %2, %3
%5(1) = G_FCMP { s1, s32 } floatpred(uno), %2, %3
; CHECK: %5(s1) = G_FCMP floatpred(uno), %2, %3
%5(s1) = G_FCMP floatpred(uno), %2, %3
...
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: test_copy
; CHECK: %0(64) = G_TYPE s64 %x0
; CHECK: %0(s64) = G_TYPE %x0
; CHECK-NEXT: %x0 = COPY %0
%0(64) = G_TYPE s64 %x0
%0(s64) = G_TYPE %x0
%x0 = COPY %0
...

Expand Down
58 changes: 30 additions & 28 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,23 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_load
%0(64) = G_TYPE s64 %x0
%0(p0) = COPY %x0
; CHECK: [[BIT8:%[0-9]+]](8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
; CHECK: %1(1) = G_TRUNC { s1, s8 } [[BIT8]]
%1(1) = G_LOAD { s1, p0 } %0 :: (load 1 from %ir.addr)
; CHECK: [[BIT8:%[0-9]+]](s8) = G_LOAD %0 :: (load 1 from %ir.addr)
; CHECK: %1(s1) = G_TRUNC [[BIT8]]
%1(s1) = G_LOAD %0 :: (load 1 from %ir.addr)
; CHECK: %2(8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
%2(8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
; CHECK: %2(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%2(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
; CHECK: %3(16) = G_LOAD { s16, p0 } %0 :: (load 2 from %ir.addr)
%3(16) = G_LOAD { s16, p0 } %0 :: (load 2 from %ir.addr)
; CHECK: %3(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
%3(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
; CHECK: %4(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
%4(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
; CHECK: %4(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
%4(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
; CHECK: %5(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
%5(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
; CHECK: %5(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
%5(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
...

---
Expand All @@ -53,30 +53,32 @@ registers:
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_store
%0(64) = G_TYPE s64 %x0
%1(32) = G_TYPE s32 %w1
%0(p0) = COPY %x0
%1(s32) = COPY %w1
; CHECK: [[BIT8:%[0-9]+]](8) = G_ANYEXT { s8, s1 } %2
; CHECK: G_STORE { s8, p0 } [[BIT8]], %0 :: (store 1 into %ir.addr)
%2(1) = G_TRUNC s1 %1
G_STORE { s1, p0 } %2, %0 :: (store 1 into %ir.addr)
; CHECK: [[BIT8:%[0-9]+]](s8) = G_ANYEXT %2
; CHECK: G_STORE [[BIT8]], %0 :: (store 1 into %ir.addr)
%2(s1) = G_TRUNC %1
G_STORE %2, %0 :: (store 1 into %ir.addr)
; CHECK: G_STORE { s8, p0 } %3, %0 :: (store 1 into %ir.addr)
%3(8) = G_TRUNC s8 %1
G_STORE { s8, p0 } %3, %0 :: (store 1 into %ir.addr)
; CHECK: G_STORE %3, %0 :: (store 1 into %ir.addr)
%3(s8) = G_TRUNC %1
G_STORE %3, %0 :: (store 1 into %ir.addr)
; CHECK: G_STORE { s16, p0 } %4, %0 :: (store 2 into %ir.addr)
%4(16) = G_TRUNC s16 %1
G_STORE { s16, p0 } %4, %0 :: (store 2 into %ir.addr)
; CHECK: G_STORE %4, %0 :: (store 2 into %ir.addr)
%4(s16) = G_TRUNC %1
G_STORE %4, %0 :: (store 2 into %ir.addr)
; CHECK: G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
; CHECK: G_STORE %1, %0 :: (store 4 into %ir.addr)
G_STORE %1, %0 :: (store 4 into %ir.addr)
; CHECK: G_STORE { s64, p0 } %0, %0 :: (store 8 into %ir.addr)
G_STORE { s64, p0 } %0, %0 :: (store 8 into %ir.addr)
; CHECK: G_STORE %5, %0 :: (store 8 into %ir.addr)
%5(s64) = G_PTRTOINT %0
G_STORE %5, %0 :: (store 8 into %ir.addr)
...
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_mul_small
; CHECK: %4(8) = G_MUL s8 %2, %3
; CHECK: %4(s8) = G_MUL %2, %3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_MUL s8 %2, %3
%5(64) = G_ANYEXT { s64, s8 } %2
%0(s64) = G_TYPE %x0
%1(s64) = G_TYPE %x1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
%4(s8) = G_MUL %2, %3
%5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_or_small
; CHECK: %4(8) = G_OR s8 %2, %3
; CHECK: %4(s8) = G_OR %2, %3
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_OR s8 %2, %3
%5(64) = G_ANYEXT { s64, s8 } %2
%0(s64) = G_TYPE %x0
%1(s64) = G_TYPE %x1
%2(s8) = G_TRUNC %0
%3(s8) = G_TRUNC %1
%4(s8) = G_OR %2, %3
%5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
50 changes: 25 additions & 25 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
Original file line number Diff line number Diff line change
Expand Up @@ -27,40 +27,40 @@ body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK: [[QUOT:%[0-9]+]](64) = G_UDIV s64 %0, %1
; CHECK: [[PROD:%[0-9]+]](64) = G_MUL s64 [[QUOT]], %1
; CHECK: [[RES:%[0-9]+]](64) = G_SUB s64 %0, [[PROD]]
%0(64) = G_TYPE s64 %x0
%1(64) = G_TYPE s64 %x1
%2(64) = G_UREM s64 %0, %1
; CHECK: [[QUOT:%[0-9]+]](s64) = G_UDIV %0, %1
; CHECK: [[PROD:%[0-9]+]](s64) = G_MUL [[QUOT]], %1
; CHECK: [[RES:%[0-9]+]](s64) = G_SUB %0, [[PROD]]
%0(s64) = COPY %x0
%1(s64) = COPY %x1
%2(s64) = G_UREM %0, %1
; CHECK: [[QUOT:%[0-9]+]](32) = G_SDIV s32 %3, %4
; CHECK: [[PROD:%[0-9]+]](32) = G_MUL s32 [[QUOT]], %4
; CHECK: [[RES:%[0-9]+]](32) = G_SUB s32 %3, [[PROD]]
%3(32) = G_TRUNC { s32, s64 } %0
%4(32) = G_TRUNC { s32, s64 } %1
%5(32) = G_SREM s32 %3, %4
; CHECK: [[QUOT:%[0-9]+]](s32) = G_SDIV %3, %4
; CHECK: [[PROD:%[0-9]+]](s32) = G_MUL [[QUOT]], %4
; CHECK: [[RES:%[0-9]+]](s32) = G_SUB %3, [[PROD]]
%3(s32) = G_TRUNC %0
%4(s32) = G_TRUNC %1
%5(s32) = G_SREM %3, %4
; CHECK: [[LHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %6
; CHECK: [[RHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %7
; CHECK: [[QUOT32:%[0-9]+]](32) = G_SDIV s32 [[LHS32]], [[RHS32]]
; CHECK: [[QUOT:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
; CHECK: [[PROD:%[0-9]+]](8) = G_MUL s8 [[QUOT]], %7
; CHECK: [[RES:%[0-9]+]](8) = G_SUB s8 %6, [[PROD]]
%6(8) = G_TRUNC { s8, s64 } %0
%7(8) = G_TRUNC { s8, s64 } %1
%8(8) = G_SREM s8 %6, %7
; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %6
; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %7
; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]]
; CHECK: [[QUOT:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
; CHECK: [[PROD:%[0-9]+]](s8) = G_MUL [[QUOT]], %7
; CHECK: [[RES:%[0-9]+]](s8) = G_SUB %6, [[PROD]]
%6(s8) = G_TRUNC %0
%7(s8) = G_TRUNC %1
%8(s8) = G_SREM %6, %7
; CHECK: %d0 = COPY %0
; CHECK: %d1 = COPY %1
; CHECK: BL $fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
; CHECK: %9(64) = G_TYPE s64 %d0
%9(64) = G_FREM s64 %0, %1
; CHECK: %9(s64) = COPY %d0
%9(s64) = G_FREM %0, %1
; CHECK: %s0 = COPY %3
; CHECK: %s1 = COPY %4
; CHECK: BL $fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
; CHECK: %10(32) = G_TYPE s32 %s0
%10(32) = G_FREM s32 %3, %4
; CHECK: %10(s32) = COPY %s0
%10(s32) = G_FREM %3, %4
...
Loading