diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp index c28c25fe5ac9e..2bdaddaa11761 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp @@ -65,7 +65,7 @@ recursivelyVisitUsers(GlobalValue &GV, continue; if (Instruction *I = dyn_cast(U)) { - Function *F = I->getParent()->getParent(); + Function *F = I->getFunction(); if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) { // FIXME: This is a horrible hack. We should always respect noinline, // and just let us hit the error when we can't handle this. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp index 5700468e2420e..ddc675bbb8fb7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -1968,7 +1968,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy { int NumBits = 0; auto TRI = TII->getRegisterInfo(); - auto &MRI = MI->getParent()->getParent()->getRegInfo(); + auto &MRI = MI->getMF()->getRegInfo(); for (auto &Elt : Collection) { auto Op = Elt->getInstr()->getOperand(0); auto Size = diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index f5081a9d2dd56..3fbdab7ec4ed2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1216,7 +1216,7 @@ void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( const SmallVectorImpl &Ins) const { const MachineFunction &MF = State.getMachineFunction(); const Function &Fn = MF.getFunction(); - LLVMContext &Ctx = Fn.getParent()->getContext(); + LLVMContext &Ctx = Fn.getContext(); const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(); CallingConv::ID CC = Fn.getCallingConv(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 15ed60b46a9c0..650df2a87506a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -1998,7 +1998,7 @@ bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, } bool AMDGPUInstructionSelector::selectInitWholeWave(MachineInstr &MI) const { - MachineFunction *MF = MI.getParent()->getParent(); + MachineFunction *MF = MI.getMF(); SIMachineFunctionInfo *MFInfo = MF->getInfo(); MFInfo->setInitWholeWave(); @@ -3690,7 +3690,7 @@ bool AMDGPUInstructionSelector::selectBVHIntersectRayIntrinsic( MI.getOpcode() == AMDGPU::G_AMDGPU_BVH_INTERSECT_RAY ? 1 : 3; MI.setDesc(TII.get(MI.getOperand(OpcodeOpIdx).getImm())); MI.removeOperand(OpcodeOpIdx); - MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); + MI.addImplicitDefUseOperands(*MI.getMF()); return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); } @@ -3793,7 +3793,7 @@ bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const { MI.removeOperand(4); // VDst_In MI.removeOperand(1); // Intrinsic ID MI.addOperand(VDst_In); // Readd VDst_In to the end - MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); + MI.addImplicitDefUseOperands(*MI.getMF()); const MCInstrDesc &MCID = MI.getDesc(); if (MCID.getOperandConstraint(0, MCOI::EARLY_CLOBBER) != -1) { MI.getOperand(0).setIsEarlyClobber(true); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp index aa755344d3325..821d7f38fcb41 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -845,7 +845,7 @@ bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) { return false; } } - LLVMContext &context = CI->getParent()->getParent()->getContext(); + LLVMContext &context = CI->getContext(); Constant *nval; if (getArgType(FInfo) == AMDGPULibFunc::F32) { SmallVector FVal; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp index dec781d71c54e..755b44c0ca93a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -64,7 +64,7 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) { return false; const GCNSubtarget &ST = TM.getSubtarget(F); - LLVMContext &Ctx = F.getParent()->getContext(); + LLVMContext &Ctx = F.getContext(); const DataLayout &DL = F.getDataLayout(); BasicBlock &EntryBlock = *F.begin(); IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock)); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp index fed7a13a69bc4..248d7dcc9ec3e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp @@ -98,7 +98,7 @@ static void annotateGridSizeLoadWithRangeMD(LoadInst *Load, } static bool processUse(CallInst *CI, bool IsV5OrAbove) { - Function *F = CI->getParent()->getParent(); + Function *F = CI->getFunction(); auto *MD = F->getMetadata("reqd_work_group_size"); const bool HasReqdWorkGroupSize = MD && MD->getNumOperands() == 3; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index 844649ebb9ae6..dee3dff3bf575 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -243,7 +243,7 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { int MCOpcode = TII->pseudoToMCOpcode(Opcode); if (MCOpcode == -1) { - LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); + LLVMContext &C = MI->getMF()->getFunction().getContext(); C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " "a target-specific version: " + Twine(MI->getOpcode())); } @@ -332,7 +332,7 @@ void AMDGPUAsmPrinter::emitInstruction(const MachineInstr *MI) { StringRef Err; if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { - LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); + LLVMContext &C = MI->getMF()->getFunction().getContext(); C.emitError("Illegal instruction detected: " + Err); MI->print(errs()); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp index 6e54737065d20..4a70c5d6e78f6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp @@ -177,8 +177,7 @@ void AMDGPUPreLegalizerCombinerImpl::applyClampI64ToI16( MachineInstr &MI, const ClampI64ToI16MatchInfo &MatchInfo) const { Register Src = MatchInfo.Origin; - assert(MI.getParent()->getParent()->getRegInfo().getType(Src) == - LLT::scalar(64)); + assert(MI.getMF()->getRegInfo().getType(Src) == LLT::scalar(64)); const LLT S32 = LLT::scalar(32); auto Unmerge = B.buildUnmerge(S32, Src); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp index ffbbf63969427..7d6e3edc75e1f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernelArguments.cpp @@ -127,7 +127,7 @@ class PreloadKernelArgInfo { // will also be preloaded even if that data is unused. Function *cloneFunctionWithPreloadImplicitArgs(unsigned LastPreloadIndex) { FunctionType *FT = F.getFunctionType(); - LLVMContext &Ctx = F.getParent()->getContext(); + LLVMContext &Ctx = F.getContext(); SmallVector FTypes(FT->param_begin(), FT->param_end()); for (unsigned I = 0; I <= LastPreloadIndex; ++I) FTypes.push_back(getHiddenArgType(Ctx, HiddenArg(I))); @@ -196,7 +196,7 @@ class PreloadKernelArgInfo { SmallVector, 4> ImplicitArgLoads; for (auto *U : ImplicitArgPtr->users()) { Instruction *CI = dyn_cast(U); - if (!CI || CI->getParent()->getParent() != &F) + if (!CI || CI->getFunction() != &F) continue; for (auto *U : CI->users()) { @@ -213,7 +213,7 @@ class PreloadKernelArgInfo { continue; // FIXME: Expand handle merged loads. - LLVMContext &Ctx = F.getParent()->getContext(); + LLVMContext &Ctx = F.getContext(); Type *LoadTy = Load->getType(); HiddenArg HA = getHiddenArgFromOffset(Offset); if (HA == END_HIDDEN_ARGS || LoadTy != getHiddenArgType(Ctx, HA)) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp index f5e14c71b02d9..416de901ef19b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp @@ -129,7 +129,7 @@ static StringRef getAsConstantStr(Value *V) { static void diagnoseInvalidFormatString(const CallBase *CI) { CI->getContext().diagnose(DiagnosticInfoUnsupported( - *CI->getParent()->getParent(), + *CI->getFunction(), "printf format string must be a trivially resolved constant string " "global variable", CI->getDebugLoc())); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index ddabd25894414..bb95265a794a0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -1378,7 +1378,7 @@ bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { for (const User *U : Val->users()) { if (const Instruction *Use = dyn_cast(U)) { - if (Use->getParent()->getParent() == &F) + if (Use->getFunction() == &F) return true; } else { const Constant *C = cast(U); @@ -1489,7 +1489,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I, const DataLayout &DL = Mod->getDataLayout(); IRBuilder<> Builder(&I); - const Function &ContainingFunction = *I.getParent()->getParent(); + const Function &ContainingFunction = *I.getFunction(); CallingConv::ID CC = ContainingFunction.getCallingConv(); // Don't promote the alloca to LDS for shader calling conventions as the work @@ -1544,7 +1544,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I, LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); - Function *F = I.getParent()->getParent(); + Function *F = I.getFunction(); Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); GlobalVariable *GV = new GlobalVariable( diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index 90d319f578f44..7ed026ee5f69e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -468,7 +468,7 @@ RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::getInstrAlternativeMappings( const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -2409,7 +2409,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl( if (DstBank == &AMDGPU::VCCRegBank) break; - MachineFunction *MF = MI.getParent()->getParent(); + MachineFunction *MF = MI.getMF(); ApplyRegBankMapping ApplyBank(B, *this, MRI, DstBank); LegalizerHelper Helper(*MF, ApplyBank, B); @@ -2489,7 +2489,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl( // There is no VALU abs instruction so we need to replace it with a sub and // max combination. if (SrcBank && SrcBank == &AMDGPU::VGPRRegBank) { - MachineFunction *MF = MI.getParent()->getParent(); + MachineFunction *MF = MI.getMF(); ApplyRegBankMapping Apply(B, *this, MRI, &AMDGPU::VGPRRegBank); LegalizerHelper Helper(*MF, Apply, B); @@ -3604,7 +3604,7 @@ unsigned AMDGPURegisterBankInfo::getMappingType(const MachineRegisterInfo &MRI, } bool AMDGPURegisterBankInfo::isSALUMapping(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) @@ -3620,7 +3620,7 @@ bool AMDGPURegisterBankInfo::isSALUMapping(const MachineInstr &MI) const { const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingSOP(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector OpdsMapping(MI.getNumOperands()); @@ -3638,7 +3638,7 @@ AMDGPURegisterBankInfo::getDefaultMappingSOP(const MachineInstr &MI) const { const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingVOP(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector OpdsMapping(MI.getNumOperands()); @@ -3662,7 +3662,7 @@ AMDGPURegisterBankInfo::getDefaultMappingVOP(const MachineInstr &MI) const { const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingAllVGPR(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector OpdsMapping(MI.getNumOperands()); @@ -3741,7 +3741,7 @@ AMDGPURegisterBankInfo::getValueMappingForPtr(const MachineRegisterInfo &MRI, const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector OpdsMapping(2); unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); @@ -3831,7 +3831,7 @@ AMDGPURegisterBankInfo::getAGPROpMapping(Register Reg, // const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); if (MI.isCopy() || MI.getOpcode() == AMDGPU::G_FREEZE) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp index 4b1f80c777827..a2e16c7f873f7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp @@ -299,7 +299,7 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) { if (Replacements.empty()) return false; - LLVMContext &Ctx = F.getParent()->getContext(); + LLVMContext &Ctx = F.getContext(); StructType *NewRetTy = StructType::create(Ctx, ReturnTypes, F.getName()); FunctionType *NewFuncTy = FunctionType::get(NewRetTy, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp index 26e0b3dfc2e8a..c7528f993da1e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -282,7 +282,7 @@ bool AMDGPUSubtarget::isSingleLaneExecution(const Function &Func) const { } bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const { - Function *Kernel = I->getParent()->getParent(); + Function *Kernel = I->getFunction(); unsigned MinSize = 0; unsigned MaxSize = getFlatWorkGroupSizes(*Kernel).second; bool IdQuery = false; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp index 4a9437b37aa39..8695a25b10227 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp @@ -523,7 +523,7 @@ static void replacesUsesOfGlobalInFunction(Function *Func, GlobalVariable *GV, auto ReplaceUsesLambda = [Func](const Use &U) -> bool { auto *V = U.getUser(); if (auto *Inst = dyn_cast(V)) { - auto *Func1 = Inst->getParent()->getParent(); + auto *Func1 = Inst->getFunction(); if (Func == Func1) return true; } diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp index 4e11c4ff3d56e..60956f7bac7c8 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp @@ -580,7 +580,7 @@ void GCNUpwardRPTracker::recede(const MachineInstr &MI) { bool GCNDownwardRPTracker::reset(const MachineInstr &MI, const LiveRegSet *LiveRegsCopy) { - MRI = &MI.getParent()->getParent()->getRegInfo(); + MRI = &MI.getMF()->getRegInfo(); LastTrackedMI = nullptr; MBBEnd = MI.getParent()->end(); NextMI = &MI; diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h index 4b22c68ef01c5..f54874d2a5b40 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h @@ -463,7 +463,7 @@ getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS) { } llvm::sort(Indexes); - auto &MRI = (*R.begin())->getParent()->getParent()->getRegInfo(); + auto &MRI = (*R.begin())->getMF()->getRegInfo(); DenseMap LiveRegMap; SmallVector LiveIdxs, SRLiveIdxs; for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { @@ -493,13 +493,13 @@ getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS) { inline GCNRPTracker::LiveRegSet getLiveRegsAfter(const MachineInstr &MI, const LiveIntervals &LIS) { return getLiveRegs(LIS.getInstructionIndex(MI).getDeadSlot(), LIS, - MI.getParent()->getParent()->getRegInfo()); + MI.getMF()->getRegInfo()); } inline GCNRPTracker::LiveRegSet getLiveRegsBefore(const MachineInstr &MI, const LiveIntervals &LIS) { return getLiveRegs(LIS.getInstructionIndex(MI).getBaseIndex(), LIS, - MI.getParent()->getParent()->getRegInfo()); + MI.getMF()->getRegInfo()); } template diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp index 01040854e1577..7f805e67c62ec 100644 --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -176,7 +176,7 @@ bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { } bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const { - const MachineFunction *MF = MI.getParent()->getParent(); + const MachineFunction *MF = MI.getMF(); return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) && usesVertexCache(MI.getOpcode()); } @@ -186,7 +186,7 @@ bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { } bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const { - const MachineFunction *MF = MI.getParent()->getParent(); + const MachineFunction *MF = MI.getMF(); return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) && usesVertexCache(MI.getOpcode())) || usesTextureCache(MI.getOpcode()); @@ -948,7 +948,7 @@ bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, .setReg(Pred[2].getReg()); MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_W)) .setReg(Pred[2].getReg()); - MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); + MachineInstrBuilder MIB(*MI.getMF(), MI); MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit); return true; } @@ -956,7 +956,7 @@ bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, if (PIdx != -1) { MachineOperand &PMO = MI.getOperand(PIdx); PMO.setReg(Pred[2].getReg()); - MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); + MachineInstrBuilder MIB(*MI.getMF(), MI); MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit); return true; } diff --git a/llvm/lib/Target/AMDGPU/R600MCInstLower.cpp b/llvm/lib/Target/AMDGPU/R600MCInstLower.cpp index 48b4e7f0d07be..ac6508c2322ce 100644 --- a/llvm/lib/Target/AMDGPU/R600MCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/R600MCInstLower.cpp @@ -55,7 +55,7 @@ void R600AsmPrinter::emitInstruction(const MachineInstr *MI) { StringRef Err; if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { - LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); + LLVMContext &C = MI->getMF()->getFunction().getContext(); C.emitError("Illegal instruction detected: " + Err); MI->print(errs()); } diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index 7793907c032d2..9a8710becba39 100644 --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -238,7 +238,7 @@ static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, const SIRegisterInfo *TRI, const SIInstrInfo *TII) { - MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); auto &Src = MI.getOperand(1); Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = Src.getReg(); @@ -930,7 +930,7 @@ bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr &MI, // s_mov_b32. if (isSafeToFoldImmIntoCopy(&MI, MRI->getVRegDef(SrcReg), TII, SMovOp, Imm)) { MI.getOperand(1).ChangeToImmediate(Imm); - MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); + MI.addImplicitDefUseOperands(*MI.getMF()); MI.setDesc(TII->get(SMovOp)); return true; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 2c00e23d113cb..f4d38c0c3d8c3 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1327,7 +1327,7 @@ void SIFoldOperandsImpl::foldOperand( if (MovOp == AMDGPU::V_MOV_B16_t16_e64) { const auto &SrcOp = UseMI->getOperand(UseOpIdx); MachineOperand NewSrcOp(SrcOp); - MachineFunction *MF = UseMI->getParent()->getParent(); + MachineFunction *MF = UseMI->getMF(); UseMI->removeOperand(1); UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // src0_modifiers UseMI->addOperand(NewSrcOp); // src0 @@ -1780,7 +1780,7 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI, if (CopiesToReplace.empty() && FoldList.empty()) return Changed; - MachineFunction *MF = MI.getParent()->getParent(); + MachineFunction *MF = MI.getMF(); // Make sure we add EXEC uses to any new v_mov instructions created. for (MachineInstr *Copy : CopiesToReplace) Copy->addImplicitDefUseOperands(*MF); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 768c0abd2e3f1..3de6f677f281f 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4052,7 +4052,7 @@ bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { if (!CI->isTailCall()) return false; - const Function *ParentFn = CI->getParent()->getParent(); + const Function *ParentFn = CI->getFunction(); if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) return false; return true; @@ -17422,7 +17422,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const { const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); - MachineFunction *MF = MI.getParent()->getParent(); + MachineFunction *MF = MI.getMF(); MachineRegisterInfo &MRI = MF->getRegInfo(); if (TII->isVOP3(MI.getOpcode())) { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 00a5a27dc7c93..c7c68d2632c17 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -162,7 +162,7 @@ bool SIInstrInfo::resultDependsOnExec(const MachineInstr &MI) const { if (!DstReg.isVirtual()) return true; - const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) { switch (Use.getOpcode()) { case AMDGPU::S_AND_SAVEEXEC_B32: @@ -3984,7 +3984,7 @@ static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm, MachineInstr **DefMI = nullptr) { if (!MO->isReg()) return false; - const MachineFunction *MF = MO->getParent()->getParent()->getParent(); + const MachineFunction *MF = MO->getParent()->getMF(); const MachineRegisterInfo &MRI = MF->getRegInfo(); return getFoldableImm(MO->getReg(), MRI, Imm, DefMI); } @@ -4999,7 +4999,7 @@ bool SIInstrInfo::verifyCopy(const MachineInstr &MI, bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const { uint16_t Opcode = MI.getOpcode(); - const MachineFunction *MF = MI.getParent()->getParent(); + const MachineFunction *MF = MI.getMF(); const MachineRegisterInfo &MRI = MF->getRegInfo(); // FIXME: At this point the COPY verify is done only for non-ssa forms. @@ -5805,7 +5805,7 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM; case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM; case AMDGPU::S_MOV_B32: { - const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); return MI.getOperand(1).isReg() || RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; @@ -6080,8 +6080,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, Register Reg = MI.getOperand(OpNo).getReg(); if (Reg.isVirtual()) { - const MachineRegisterInfo &MRI = - MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); return MRI.getRegClass(Reg); } return RI.getPhysRegBaseClass(Reg); @@ -6172,7 +6171,7 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC = MRI.getRegClass(Reg); if (MO.getSubReg()) { - const MachineFunction *MF = MO.getParent()->getParent()->getParent(); + const MachineFunction *MF = MO.getParent()->getMF(); const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); if (!SuperRC) return false; @@ -6184,7 +6183,7 @@ bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, bool SIInstrInfo::isLegalRegOperand(const MachineInstr &MI, unsigned OpIdx, const MachineOperand &MO) const { - const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); const MCOperandInfo OpInfo = MI.getDesc().operands()[OpIdx]; unsigned Opc = MI.getOpcode(); @@ -6286,7 +6285,7 @@ bool SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand( bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); const MCInstrDesc &InstDesc = MI.getDesc(); const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx]; @@ -7182,7 +7181,7 @@ extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { MachineBasicBlock * SIInstrInfo::legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT) const { - MachineFunction &MF = *MI.getParent()->getParent(); + MachineFunction &MF = *MI.getMF(); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineBasicBlock *CreatedBB = nullptr; @@ -9278,7 +9277,7 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(const MachineOperand &Op, int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI, false); if (SCCIdx != -1) { if (MI.isCopy()) { - MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); Register DestReg = MI.getOperand(0).getReg(); MRI.replaceRegWith(DestReg, NewCond); @@ -9390,7 +9389,7 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, return SGPRReg; Register UsedSGPRs[3] = {Register()}; - const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); for (unsigned i = 0; i < 3; ++i) { int Idx = OpIndices[i]; @@ -9640,7 +9639,7 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { return getInstBundleSize(MI); case TargetOpcode::INLINEASM: case TargetOpcode::INLINEASM_BR: { - const MachineFunction *MF = MI.getParent()->getParent(); + const MachineFunction *MF = MI.getMF(); const char *AsmStr = MI.getOperand(0).getSymbolName(); return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); } @@ -9775,7 +9774,7 @@ bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI, // needed by the prolog. However, the insertions for scalar registers can // always be placed at the BB top as they are independent of the exec mask // value. - const MachineFunction *MF = MI.getParent()->getParent(); + const MachineFunction *MF = MI.getMF(); bool IsNullOrVectorRegister = true; if (Reg) { const MachineRegisterInfo &MRI = MF->getRegInfo(); @@ -10562,7 +10561,7 @@ SIInstrInfo::getInstructionUniformity(const MachineInstr &MI) const { return InstructionUniformity::Default; } - const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); const AMDGPURegisterBankInfo *RBI = ST.getRegBankInfo(); // FIXME: It's conceptually broken to report this for an instruction, and not diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 2ecd94186e1e0..2d6b3467c86a2 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -1171,13 +1171,13 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { bool isVGPRCopy(const MachineInstr &MI) const { assert(isCopyInstr(MI)); Register Dest = MI.getOperand(0).getReg(); - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); return !RI.isSGPRReg(MRI, Dest); } bool hasVGPRUses(const MachineInstr &MI) const { - const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); return llvm::any_of(MI.explicit_uses(), [&MRI, this](const MachineOperand &MO) { diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 6ab8d5521ebdb..0dac6d2d7ee4b 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -777,7 +777,7 @@ getSynchronizeAddrSpaceMD(const MachineInstr &MI) { void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI, const char *Msg) const { - const Function &Func = MI->getParent()->getParent()->getFunction(); + const Function &Func = MI->getMF()->getFunction(); Func.getContext().diagnose( DiagnosticInfoUnsupported(Func, Msg, MI->getDebugLoc())); } diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp index 86ca22cfeffd8..acc4b3f0a68b4 100644 --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -118,7 +118,7 @@ class SDWAOperand { MachineInstr *getParentInst() const { return Target->getParent(); } MachineRegisterInfo *getMRI() const { - return &getParentInst()->getParent()->getParent()->getRegInfo(); + return &getParentInst()->getMF()->getRegInfo(); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -1284,7 +1284,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI, // Clone the instruction to allow revoking changes // made to MI during the processing of the operands // if the conversion fails. - SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI); + SDWAInst = MI.getMF()->CloneMachineInstr(&MI); MI.getParent()->insert(MI.getIterator(), SDWAInst); } else { SDWAInst = createSDWAVersion(MI); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index ecf3aee6048cd..840c1bdde173b 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -1949,7 +1949,7 @@ void SIRegisterInfo::buildSpillLoadStore( void SIRegisterInfo::addImplicitUsesForBlockCSRLoad(MachineInstrBuilder &MIB, Register BlockReg) const { - const MachineFunction *MF = MIB->getParent()->getParent(); + const MachineFunction *MF = MIB->getMF(); const SIMachineFunctionInfo *FuncInfo = MF->getInfo(); uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(BlockReg); Register BaseVGPR = getSubReg(BlockReg, AMDGPU::sub0); @@ -2319,7 +2319,7 @@ bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex( bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const { - MachineFunction *MF = MI->getParent()->getParent(); + MachineFunction *MF = MI->getMF(); MachineBasicBlock *MBB = MI->getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo &FrameInfo = MF->getFrameInfo();