Skip to content

Commit

Permalink
[GlobalISel] Use more MachineIRBuilder helper methods
Browse files Browse the repository at this point in the history
Reviewers: arsenm, nhaehnle

Subscribers: wdng, rovka, hiraditya, volkan, Petar.Avramovic, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D72833
  • Loading branch information
jayfoad committed Jan 16, 2020
1 parent 15a6df5 commit 28bb43b
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 102 deletions.
7 changes: 4 additions & 3 deletions llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
Expand Up @@ -876,7 +876,7 @@ class MachineIRBuilder {
///
/// \return The newly created instruction.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op,
Optional<unsigned> FLags = None);
Optional<unsigned> Flags = None);

/// Build and insert \p Res = G_TRUNC \p Op
///
Expand Down Expand Up @@ -1377,8 +1377,9 @@ class MachineIRBuilder {

/// Build and insert \p Res = G_FSUB \p Op0, \p Op1
MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1) {
return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1});
const SrcOp &Src1,
Optional<unsigned> Flags = None) {
return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
}

/// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
Expand Down
67 changes: 30 additions & 37 deletions llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Expand Up @@ -316,7 +316,7 @@ bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
// Negate the last operand of the FSUB
MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
MIRBuilder.buildFNeg(Res, Op1, Flags);
return true;
}
return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
Expand All @@ -330,7 +330,7 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
const Instruction &I = cast<Instruction>(U);
Flags = MachineInstr::copyFlagsFromInstruction(I);
}
MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
MIRBuilder.buildFNeg(Res, Op0, Flags);
return true;
}

Expand All @@ -353,8 +353,8 @@ bool IRTranslator::translateCompare(const User &U,
Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
else {
assert(CI && "Instruction should be CmpInst");
MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
MachineInstr::copyFlagsFromInstruction(*CI));
MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
MachineInstr::copyFlagsFromInstruction(*CI));
}

return true;
Expand Down Expand Up @@ -631,8 +631,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
if (CB.TrueBB == CB.ThisBB->getNextNode()) {
std::swap(CB.TrueBB, CB.FalseBB);
auto True = MIB.buildConstant(i1Ty, 1);
Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
.getReg(0);
Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0);
}

MIB.buildBrCond(Cond, *CB.TrueBB);
Expand Down Expand Up @@ -1016,8 +1015,7 @@ bool IRTranslator::translateSelect(const User &U,
Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);

for (unsigned i = 0; i < ResRegs.size(); ++i) {
MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
{Tst, Op0Regs[i], Op1Regs[i]}, Flags);
MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
}

return true;
Expand Down Expand Up @@ -1171,8 +1169,8 @@ void IRTranslator::getStackGuard(Register DstReg,
MachineIRBuilder &MIRBuilder) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
MIB.addDef(DstReg);
auto MIB =
MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});

auto &TLI = *MF->getSubtarget().getTargetLowering();
Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
Expand All @@ -1191,11 +1189,9 @@ void IRTranslator::getStackGuard(Register DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
MIRBuilder.buildInstr(Op)
.addDef(ResRegs[0])
.addDef(ResRegs[1])
.addUse(getOrCreateVReg(*CI.getOperand(0)))
.addUse(getOrCreateVReg(*CI.getOperand(1)));
MIRBuilder.buildInstr(
Op, {ResRegs[0], ResRegs[1]},
{getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});

return true;
}
Expand Down Expand Up @@ -1369,8 +1365,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;

// FIXME: Get alignment
MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
.addUse(getOrCreateVReg(*Ptr))
MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
.addMemOperand(MF->getMachineMemOperand(
MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
return true;
Expand Down Expand Up @@ -1423,14 +1418,14 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
TLI.getValueType(*DL, CI.getType()))) {
// TODO: Revisit this to see if we should move this part of the
// lowering to the combiner.
MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
MachineInstr::copyFlagsFromInstruction(CI));
MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
MachineInstr::copyFlagsFromInstruction(CI));
} else {
LLT Ty = getLLTForType(*CI.getType(), *DL);
auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
MachineInstr::copyFlagsFromInstruction(CI));
MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
MachineInstr::copyFlagsFromInstruction(CI));
auto FMul = MIRBuilder.buildFMul(
Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
MIRBuilder.buildFAdd(Dst, FMul, Op2,
MachineInstr::copyFlagsFromInstruction(CI));
}
return true;
}
Expand Down Expand Up @@ -1508,9 +1503,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
: TargetOpcode::G_CTTZ_ZERO_UNDEF
: Cst->isZero() ? TargetOpcode::G_CTLZ
: TargetOpcode::G_CTLZ_ZERO_UNDEF;
MIRBuilder.buildInstr(Opcode)
.addDef(getOrCreateVReg(CI))
.addUse(getOrCreateVReg(*CI.getArgOperand(0)));
MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
{getOrCreateVReg(*CI.getArgOperand(0))});
return true;
}
case Intrinsic::invariant_start: {
Expand All @@ -1528,9 +1522,9 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
return true;
case Intrinsic::read_register: {
Value *Arg = CI.getArgOperand(0);
MIRBuilder.buildInstr(TargetOpcode::G_READ_REGISTER)
.addDef(getOrCreateVReg(CI))
.addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
MIRBuilder
.buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
.addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
return true;
}
}
Expand Down Expand Up @@ -1863,10 +1857,9 @@ bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
// we're completely discarding the i64/double distinction here (amongst
// others). Fortunately the ABIs I know of where that matters don't use va_arg
// anyway but that's not guaranteed.
MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
.addDef(getOrCreateVReg(U))
.addUse(getOrCreateVReg(*U.getOperand(0)))
.addImm(DL->getABITypeAlignment(U.getType()));
MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
{getOrCreateVReg(*U.getOperand(0)),
uint64_t(DL->getABITypeAlignment(U.getType()))});
return true;
}

Expand Down Expand Up @@ -1936,10 +1929,10 @@ bool IRTranslator::translateShuffleVector(const User &U,
SmallVector<int, 8> Mask;
ShuffleVectorInst::getShuffleMask(cast<Constant>(U.getOperand(2)), Mask);
ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
.addDef(getOrCreateVReg(U))
.addUse(getOrCreateVReg(*U.getOperand(0)))
.addUse(getOrCreateVReg(*U.getOperand(1)))
MIRBuilder
.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
{getOrCreateVReg(*U.getOperand(0)),
getOrCreateVReg(*U.getOperand(1))})
.addShuffleMask(MaskAlloc);
return true;
}
Expand Down

0 comments on commit 28bb43b

Please sign in to comment.