Skip to content

Commit

Permalink
[Target] use getSubtarget<> instead of static_cast<>(getSubtarget())
Browse files Browse the repository at this point in the history
Differential Revision: https://reviews.llvm.org/D125391
  • Loading branch information
Ghost-LZW authored and mshockwave committed May 26, 2022
1 parent bd67468 commit ad73ce3
Show file tree
Hide file tree
Showing 46 changed files with 68 additions and 93 deletions.
3 changes: 3 additions & 0 deletions llvm/include/llvm/CodeGen/SelectionDAG.h
Expand Up @@ -452,6 +452,9 @@ class SelectionDAG {
const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
const TargetMachine &getTarget() const { return TM; }
const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
template <typename STC> const STC &getSubtarget() const {
return MF->getSubtarget<STC>();
}
const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
Expand Up @@ -132,7 +132,7 @@ class AArch64AsmPrinter : public AsmPrinter {

bool runOnMachineFunction(MachineFunction &MF) override {
AArch64FI = MF.getInfo<AArch64FunctionInfo>();
STI = static_cast<const AArch64Subtarget*>(&MF.getSubtarget());
STI = &MF.getSubtarget<AArch64Subtarget>();

SetupMachineFunction(MF);

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
Expand Up @@ -813,7 +813,7 @@ void FalkorHWPFFix::runOnLoop(MachineLoop &L, MachineFunction &Fn) {
}

bool FalkorHWPFFix::runOnMachineFunction(MachineFunction &Fn) {
auto &ST = static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
auto &ST = Fn.getSubtarget<AArch64Subtarget>();
if (ST.getProcFamily() != AArch64Subtarget::Falkor)
return false;

Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/AArch64/AArch64FastISel.cpp
Expand Up @@ -283,8 +283,7 @@ class AArch64FastISel final : public FastISel {
explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo)
: FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
Subtarget =
&static_cast<const AArch64Subtarget &>(FuncInfo.MF->getSubtarget());
Subtarget = &FuncInfo.MF->getSubtarget<AArch64Subtarget>();
Context = &FuncInfo.Fn->getContext();
}

Expand Down
28 changes: 10 additions & 18 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -2566,8 +2566,7 @@ static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
EVT VT = LHS.getValueType();
assert(VT != MVT::f128);

const bool FullFP16 =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16();
const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();

if (VT == MVT::f16 && !FullFP16) {
LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
Expand All @@ -2585,8 +2584,7 @@ static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG) {
EVT VT = LHS.getValueType();
const bool FullFP16 =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16();
const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();

if (VT.isFloatingPoint()) {
assert(VT != MVT::f128);
Expand Down Expand Up @@ -2694,8 +2692,7 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode OutCC,
const SDLoc &DL, SelectionDAG &DAG) {
unsigned Opcode = 0;
const bool FullFP16 =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16();
const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();

if (LHS.getValueType().isFloatingPoint()) {
assert(LHS.getValueType() != MVT::f128);
Expand Down Expand Up @@ -11865,8 +11862,7 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
}

const bool FullFP16 =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16();
const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();

// Make v4f16 (only) fcmp operations utilise vector instructions
// v8f16 support will be a litle more complicated
Expand Down Expand Up @@ -12000,7 +11996,7 @@ SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,

SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
SelectionDAG &DAG) const {
auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
return SDValue();

Expand All @@ -12017,7 +12013,7 @@ SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,

SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
SelectionDAG &DAG) const {
auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
return SDValue();

Expand Down Expand Up @@ -14797,8 +14793,7 @@ performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);

EVT VT = N->getValueType(0);
const bool FullFP16 =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16();
const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
bool IsStrict = N0->isStrictFPOpcode();

// Rewrite for pairwise fadd pattern
Expand Down Expand Up @@ -16009,8 +16004,7 @@ static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
// If we're compiling for a specific vector-length, we can check if the
// pattern's VL equals that of the scalable vector at runtime.
if (N.getOpcode() == AArch64ISD::PTRUE) {
const auto &Subtarget =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
if (MaxSVESize && MinSVESize == MaxSVESize) {
Expand Down Expand Up @@ -17231,8 +17225,7 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
Stride > std::numeric_limits<int32_t>::max())
return Changed;

const auto &Subtarget =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
unsigned MaxVScale =
Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
int64_t LastElementOffset =
Expand Down Expand Up @@ -20219,8 +20212,7 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
// For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
// AArch64SVEPredPattern::all, which can enable the use of unpredicated
// variants of instructions when available.
const auto &Subtarget =
static_cast<const AArch64Subtarget &>(DAG.getSubtarget());
const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
if (MaxSVESize && MinSVESize == MaxSVESize &&
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
Expand Up @@ -2308,7 +2308,7 @@ bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
if (skipFunction(Fn.getFunction()))
return false;

Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
Subtarget = &Fn.getSubtarget<AArch64Subtarget>();
TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
TRI = Subtarget->getRegisterInfo();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
Expand Up @@ -2335,8 +2335,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
MachineFunction &MF = *MBB.getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();

const AArch64Subtarget *Subtarget =
&static_cast<const AArch64Subtarget &>(MF.getSubtarget());
const AArch64Subtarget *Subtarget = &MF.getSubtarget<AArch64Subtarget>();
if (Subtarget->requiresStrictAlign()) {
// We don't support this feature yet.
LLVM_DEBUG(dbgs() << "AArch64 GISel does not support strict-align yet\n");
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/ARMBlockPlacement.cpp
Expand Up @@ -213,7 +213,7 @@ bool ARMBlockPlacement::processPostOrderLoops(MachineLoop *ML) {
bool ARMBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
const ARMSubtarget &ST = static_cast<const ARMSubtarget &>(MF.getSubtarget());
const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
if (!ST.hasLOB())
return false;
LLVM_DEBUG(dbgs() << DEBUG_PREFIX << "Running on " << MF.getName() << "\n");
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
Expand Up @@ -396,7 +396,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
<< MCP->getConstants().size() << " CP entries, aligned to "
<< MCP->getConstantPoolAlign().value() << " bytes *****\n");

STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
STI = &MF->getSubtarget<ARMSubtarget>();
TII = STI->getInstrInfo();
isPositionIndependentOrROPI =
STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
Expand Up @@ -3132,7 +3132,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
}

bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
STI = &MF.getSubtarget<ARMSubtarget>();
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
AFI = MF.getInfo<ARMFunctionInfo>();
Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Target/ARM/ARMFastISel.cpp
Expand Up @@ -122,8 +122,7 @@ class ARMFastISel final : public FastISel {
explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
: FastISel(funcInfo, libInfo),
Subtarget(
&static_cast<const ARMSubtarget &>(funcInfo.MF->getSubtarget())),
Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
M(const_cast<Module &>(*funcInfo.Fn->getParent())),
TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
TLI(*Subtarget->getTargetLowering()) {
Expand Down Expand Up @@ -156,7 +155,7 @@ class ARMFastISel final : public FastISel {
const LoadInst *LI) override;
bool fastLowerArguments() override;

#include "ARMGenFastISel.inc"
#include "ARMGenFastISel.inc"

// Instruction selection routines.

Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Target/ARM/ARMFrameLowering.cpp
Expand Up @@ -392,8 +392,7 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
const DebugLoc &DL, const unsigned Reg,
const Align Alignment,
const bool MustBeSingleInstruction) {
const ARMSubtarget &AST =
static_cast<const ARMSubtarget &>(MF.getSubtarget());
const ARMSubtarget &AST = MF.getSubtarget<ARMSubtarget>();
const bool CanUseBFC = AST.hasV6T2Ops() || AST.hasV7Ops();
const unsigned AlignMask = Alignment.value() - 1U;
const unsigned NrBitsToZero = Log2(Alignment);
Expand Down Expand Up @@ -1768,7 +1767,7 @@ checkNumAlignedDPRCS2Regs(MachineFunction &MF, BitVector &SavedRegs) {
return;

// We are planning to use NEON instructions vst1 / vld1.
if (!static_cast<const ARMSubtarget &>(MF.getSubtarget()).hasNEON())
if (!MF.getSubtarget<ARMSubtarget>().hasNEON())
return;

// Don't bother if the default stack alignment is sufficiently high.
Expand Down
8 changes: 3 additions & 5 deletions llvm/lib/Target/ARM/ARMISelLowering.cpp
Expand Up @@ -5807,8 +5807,7 @@ static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
return DAG.UnrollVectorOp(Op.getNode());
}

const bool HasFullFP16 =
static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16();

EVT NewTy;
const EVT OpTy = Op.getOperand(0).getValueType();
Expand Down Expand Up @@ -5918,8 +5917,7 @@ static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
Op.getOperand(0).getValueType() == MVT::v8i16) &&
"Invalid type for custom lowering!");

const bool HasFullFP16 =
static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16();

EVT DestVecType;
if (VT == MVT::v4f32)
Expand Down Expand Up @@ -9882,7 +9880,7 @@ ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
if (N->getOpcode() != ISD::SDIV)
return SDValue();

const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
const auto &ST = DAG.getSubtarget<ARMSubtarget>();
const bool MinSize = ST.hasMinSize();
const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
: ST.hasDivideInARMMode();
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
Expand Up @@ -2110,7 +2110,7 @@ bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
return false;

MF = &Fn;
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
STI = &Fn.getSubtarget<ARMSubtarget>();
TL = STI->getTargetLowering();
AFI = Fn.getInfo<ARMFunctionInfo>();
TII = STI->getInstrInfo();
Expand Down Expand Up @@ -2201,7 +2201,7 @@ bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
return false;

TD = &Fn.getDataLayout();
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
STI = &Fn.getSubtarget<ARMSubtarget>();
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
MRI = &Fn.getRegInfo();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
Expand Up @@ -1299,7 +1299,7 @@ bool LowOverheadLoop::ValidateMVEInst(MachineInstr *MI) {
}

bool ARMLowOverheadLoops::runOnMachineFunction(MachineFunction &mf) {
const ARMSubtarget &ST = static_cast<const ARMSubtarget&>(mf.getSubtarget());
const ARMSubtarget &ST = mf.getSubtarget<ARMSubtarget>();
if (!ST.hasLOB())
return false;

Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
Expand Up @@ -1052,8 +1052,7 @@ bool MVETPAndVPTOptimisations::HintDoLoopStartReg(MachineBasicBlock &MBB) {
}

bool MVETPAndVPTOptimisations::runOnMachineFunction(MachineFunction &Fn) {
const ARMSubtarget &STI =
static_cast<const ARMSubtarget &>(Fn.getSubtarget());
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();

if (!STI.isThumb2() || !STI.hasLOB())
return false;
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/ARM/MVEVPTBlockPass.cpp
Expand Up @@ -312,8 +312,7 @@ bool MVEVPTBlock::InsertVPTBlocks(MachineBasicBlock &Block) {
}

bool MVEVPTBlock::runOnMachineFunction(MachineFunction &Fn) {
const ARMSubtarget &STI =
static_cast<const ARMSubtarget &>(Fn.getSubtarget());
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();

if (!STI.isThumb2() || !STI.hasMVEIntegerOps())
return false;
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
Expand Up @@ -284,8 +284,7 @@ bool Thumb2ITBlock::InsertITInstructions(MachineBasicBlock &MBB) {
}

bool Thumb2ITBlock::runOnMachineFunction(MachineFunction &Fn) {
const ARMSubtarget &STI =
static_cast<const ARMSubtarget &>(Fn.getSubtarget());
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();
if (!STI.isThumb2())
return false;
AFI = Fn.getInfo<ARMFunctionInfo>();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
Expand Up @@ -1130,7 +1130,7 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
if (PredicateFtor && !PredicateFtor(MF.getFunction()))
return false;

STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
STI = &MF.getSubtarget<ARMSubtarget>();
if (STI->isThumb1Only() || STI->prefers32BitThumb())
return false;

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
Expand Up @@ -287,7 +287,7 @@ LLVM_DUMP_METHOD void CSKYConstantIslands::dumpBBs() {
bool CSKYConstantIslands::runOnMachineFunction(MachineFunction &Mf) {
MF = &Mf;
MCP = Mf.getConstantPool();
STI = &static_cast<const CSKYSubtarget &>(Mf.getSubtarget());
STI = &Mf.getSubtarget<CSKYSubtarget>();

LLVM_DEBUG(dbgs() << "***** CSKYConstantIslands: "
<< MCP->getConstants().size() << " CP entries, aligned to "
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
Expand Up @@ -801,7 +801,7 @@ static const HexagonTargetLowering &getHexagonLowering(SelectionDAG &G) {
return static_cast<const HexagonTargetLowering&>(G.getTargetLoweringInfo());
}
static const HexagonSubtarget &getHexagonSubtarget(SelectionDAG &G) {
return static_cast<const HexagonSubtarget&>(G.getSubtarget());
return G.getSubtarget<HexagonSubtarget>();
}

namespace llvm {
Expand Down
7 changes: 3 additions & 4 deletions llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
Expand Up @@ -1396,10 +1396,9 @@ HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
InFlag = Chain.getValue(1);

unsigned Flags =
static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
: HexagonII::MO_GDPLT;
unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls()
? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
: HexagonII::MO_GDPLT;

return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
Hexagon::R0, Flags);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
Expand Up @@ -37,7 +37,7 @@ def SDTHexagonVINSERTW0: SDTypeProfile<1, 2,
def HexagonVINSERTW0: SDNode<"HexagonISD::VINSERTW0", SDTHexagonVINSERTW0>;

def HwLen2: SDNodeXForm<imm, [{
const auto &ST = static_cast<const HexagonSubtarget&>(CurDAG->getSubtarget());
const auto &ST = CurDAG->getSubtarget<HexagonSubtarget>();
return CurDAG->getTargetConstant(ST.getVectorLength()/2, SDLoc(N), MVT::i32);
}]>;

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
Expand Up @@ -231,7 +231,7 @@ class M68kCollapseMOVEM : public MachineFunctionPass {
}

bool runOnMachineFunction(MachineFunction &MF) override {
STI = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
STI = &MF.getSubtarget<M68kSubtarget>();
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
MFI = MF.getInfo<M68kMachineFunctionInfo>();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/M68k/M68kExpandPseudo.cpp
Expand Up @@ -302,7 +302,7 @@ bool M68kExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
}

bool M68kExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
STI = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
STI = &MF.getSubtarget<M68kSubtarget>();
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
MFI = MF.getInfo<M68kMachineFunctionInfo>();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
Expand Up @@ -312,7 +312,7 @@ class M68kDAGToDAGISel : public SelectionDAGISel {
} // namespace

bool M68kDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
Subtarget = &MF.getSubtarget<M68kSubtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Mips/MicroMipsSizeReduction.cpp
Expand Up @@ -774,7 +774,7 @@ bool MicroMipsSizeReduce::ReplaceInstruction(MachineInstr *MI,

bool MicroMipsSizeReduce::runOnMachineFunction(MachineFunction &MF) {

Subtarget = &static_cast<const MipsSubtarget &>(MF.getSubtarget());
Subtarget = &MF.getSubtarget<MipsSubtarget>();

// TODO: Add support for the subtarget microMIPS32R6.
if (!Subtarget->inMicroMipsMode() || !Subtarget->hasMips32r2() ||
Expand Down

0 comments on commit ad73ce3

Please sign in to comment.