diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index a27febe15db83..34fa1f5a7ed1f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -495,7 +495,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node, // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no // constraints on the %dst register, COPY can target all legal register // classes. - unsigned SubIdx = cast(Node->getOperand(1))->getZExtValue(); + unsigned SubIdx = Node->getConstantOperandVal(1); const TargetRegisterClass *TRC = TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent()); @@ -611,7 +611,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, unsigned VReg = getVR(Node->getOperand(0), VRBaseMap); // Create the new VReg in the destination class and emit a copy. - unsigned DstRCIdx = cast(Node->getOperand(1))->getZExtValue(); + unsigned DstRCIdx = Node->getConstantOperandVal(1); const TargetRegisterClass *DstRC = TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx)); Register NewVReg = MRI->createVirtualRegister(DstRC); @@ -629,7 +629,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, void InstrEmitter::EmitRegSequence(SDNode *Node, DenseMap &VRBaseMap, bool IsClone, bool IsCloned) { - unsigned DstRCIdx = cast(Node->getOperand(0))->getZExtValue(); + unsigned DstRCIdx = Node->getConstantOperandVal(0); const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC)); const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE); @@ -1309,8 +1309,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, // Add all of the operand registers to the instruction. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - unsigned Flags = - cast(Node->getOperand(i))->getZExtValue(); + unsigned Flags = Node->getConstantOperandVal(i); const InlineAsm::Flag F(Flags); const unsigned NumVals = F.getNumOperandRegisters(); diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp index f73ddfee2b90f..e3acb58327a8c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp @@ -492,8 +492,7 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU, --NumOps; // Ignore the glue operand. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - unsigned Flags = - cast(Node->getOperand(i))->getZExtValue(); + unsigned Flags = Node->getConstantOperandVal(i); const InlineAsm::Flag F(Flags); unsigned NumVals = F.getNumOperandRegisters(); diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 47c137d2bcad7..dcecb2e0e7faf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -331,7 +331,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, unsigned Opcode = Node->getMachineOpcode(); if (Opcode == TargetOpcode::REG_SEQUENCE) { - unsigned DstRCIdx = cast(Node->getOperand(0))->getZExtValue(); + unsigned DstRCIdx = Node->getConstantOperandVal(0); const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); RegClass = RC->getID(); Cost = RegSequenceCost; @@ -1369,8 +1369,7 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl &LRegs) { --NumOps; // Ignore the glue operand. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - unsigned Flags = - cast(Node->getOperand(i))->getZExtValue(); + unsigned Flags = Node->getConstantOperandVal(i); const InlineAsm::Flag F(Flags); unsigned NumVals = F.getNumOperandRegisters(); @@ -2298,8 +2297,7 @@ void RegReductionPQBase::unscheduledNode(SUnit *SU) { continue; } if (POpc == TargetOpcode::REG_SEQUENCE) { - unsigned DstRCIdx = - cast(PN->getOperand(0))->getZExtValue(); + unsigned DstRCIdx = PN->getConstantOperandVal(0); const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); unsigned RCId = RC->getID(); // REG_SEQUENCE is untyped, so getRepRegClassCostFor could not be used diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 81facf92e55ae..eb4deb6306fd5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -7408,7 +7408,7 @@ static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { Src.getOperand(0).getOpcode() == ISD::GlobalAddress && Src.getOperand(1).getOpcode() == ISD::Constant) { G = cast(Src.getOperand(0)); - SrcDelta = cast(Src.getOperand(1))->getZExtValue(); + SrcDelta = Src.getConstantOperandVal(1); } if (!G) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 3dc6e4bbcf46b..f28211ac113ca 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -4181,8 +4181,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) { Msg << "\nIn function: " << MF->getName(); } else { bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other; - unsigned iid = - cast(N->getOperand(HasInputChain))->getZExtValue(); + unsigned iid = N->getConstantOperandVal(HasInputChain); if (iid < Intrinsic::num_intrinsics) Msg << "intrinsic %" << Intrinsic::getBaseName((Intrinsic::ID)iid); else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo()) diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 463ec41b94e97..476d99c2a7e04 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1950,7 +1950,7 @@ void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs, unsigned BaseReg, unsigned Op) { unsigned TileNum = 0; if (BaseReg != AArch64::ZA) - TileNum = cast(N->getOperand(2))->getZExtValue(); + TileNum = N->getConstantOperandVal(2); if (!SelectSMETile(BaseReg, TileNum)) return; @@ -2145,8 +2145,7 @@ void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs, const EVT ResTys[] = {MVT::Untyped, MVT::Other}; - unsigned LaneNo = - cast(N->getOperand(NumVecs + 2))->getZExtValue(); + unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2); SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), N->getOperand(NumVecs + 3), N->getOperand(0)}; @@ -2185,8 +2184,7 @@ void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs, const EVT ResTys[] = {MVT::i64, // Type of the write back register RegSeq->getValueType(0), MVT::Other}; - unsigned LaneNo = - cast(N->getOperand(NumVecs + 1))->getZExtValue(); + unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1); SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, @@ -2237,8 +2235,7 @@ void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs, SDValue RegSeq = createQTuple(Regs); - unsigned LaneNo = - cast(N->getOperand(NumVecs + 2))->getZExtValue(); + unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2); SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), N->getOperand(NumVecs + 3), N->getOperand(0)}; @@ -2269,8 +2266,7 @@ void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs, const EVT ResTys[] = {MVT::i64, // Type of the write back register MVT::Other}; - unsigned LaneNo = - cast(N->getOperand(NumVecs + 1))->getZExtValue(); + unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1); SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), N->getOperand(NumVecs + 2), // Base Register @@ -2576,8 +2572,8 @@ static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc, case AArch64::UBFMXri: Opc = NOpc; Opd0 = N->getOperand(0); - Immr = cast(N->getOperand(1).getNode())->getZExtValue(); - Imms = cast(N->getOperand(2).getNode())->getZExtValue(); + Immr = N->getConstantOperandVal(1); + Imms = N->getConstantOperandVal(2); return true; } // Unreachable @@ -3877,7 +3873,7 @@ bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) { assert(isa(N->getOperand(2)) && "Expected a constant integer expression."); unsigned Reg = PMapper->Encoding; - uint64_t Immed = cast(N->getOperand(2))->getZExtValue(); + uint64_t Immed = N->getConstantOperandVal(2); CurDAG->SelectNodeTo( N, State, MVT::Other, CurDAG->getTargetConstant(Reg, DL, MVT::i32), CurDAG->getTargetConstant(Immed, DL, MVT::i16), N->getOperand(0)); @@ -4173,8 +4169,7 @@ bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) { SDValue IRG_SP = N->getOperand(2); if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN || - cast(IRG_SP->getOperand(1))->getZExtValue() != - Intrinsic::aarch64_irg_sp) { + IRG_SP->getConstantOperandVal(1) != Intrinsic::aarch64_irg_sp) { return false; } @@ -4183,7 +4178,7 @@ bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) { int FI = cast(N->getOperand(1))->getIndex(); SDValue FiOp = CurDAG->getTargetFrameIndex( FI, TLI->getPointerTy(CurDAG->getDataLayout())); - int TagOffset = cast(N->getOperand(3))->getZExtValue(); + int TagOffset = N->getConstantOperandVal(3); SDNode *Out = CurDAG->getMachineNode( AArch64::TAGPstack, DL, MVT::i64, @@ -4203,7 +4198,7 @@ void AArch64DAGToDAGISel::SelectTagP(SDNode *N) { // General case for unrelated pointers in Op1 and Op2. SDLoc DL(N); - int TagOffset = cast(N->getOperand(3))->getZExtValue(); + int TagOffset = N->getConstantOperandVal(3); SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64, {N->getOperand(1), N->getOperand(2)}); SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64, @@ -4219,7 +4214,7 @@ bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) { assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!"); // Bail when not a "cast" like insert_subvector. - if (cast(N->getOperand(2))->getZExtValue() != 0) + if (N->getConstantOperandVal(2) != 0) return false; if (!N->getOperand(0).isUndef()) return false; @@ -4250,7 +4245,7 @@ bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) { assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!"); // Bail when not a "cast" like extract_subvector. - if (cast(N->getOperand(1))->getZExtValue() != 0) + if (N->getConstantOperandVal(1) != 0) return false; // Bail when normal isel can do the job. @@ -4422,7 +4417,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { return; } case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(1); switch (IntNo) { default: break; @@ -5179,7 +5174,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { } } break; case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntNo = cast(Node->getOperand(0))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(0); switch (IntNo) { default: break; @@ -5782,7 +5777,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) { break; } case ISD::INTRINSIC_VOID: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(1); if (Node->getNumOperands() >= 3) VT = Node->getOperand(2)->getValueType(0); switch (IntNo) { @@ -6806,7 +6801,7 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) { if (Opcode != ISD::INTRINSIC_VOID && Opcode != ISD::INTRINSIC_W_CHAIN) return EVT(); - switch (cast(Root->getOperand(1))->getZExtValue()) { + switch (Root->getConstantOperandVal(1)) { default: return EVT(); case Intrinsic::aarch64_sme_ldr: diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index dffe69bdb900d..c000f33e2c541 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2196,7 +2196,7 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode( } case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_VOID: { - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); switch (IntNo) { default: break; @@ -3922,9 +3922,9 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { // 4: bool isDataCache static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { SDLoc DL(Op); - unsigned IsWrite = cast(Op.getOperand(2))->getZExtValue(); - unsigned Locality = cast(Op.getOperand(3))->getZExtValue(); - unsigned IsData = cast(Op.getOperand(4))->getZExtValue(); + unsigned IsWrite = Op.getConstantOperandVal(2); + unsigned Locality = Op.getConstantOperandVal(3); + unsigned IsData = Op.getConstantOperandVal(4); bool IsStream = !Locality; // When the locality number is set @@ -4973,10 +4973,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op, SDValue Chain = Op.getOperand(0); SDValue Addr = Op.getOperand(2); - unsigned IsWrite = cast(Op.getOperand(3))->getZExtValue(); - unsigned Locality = cast(Op.getOperand(4))->getZExtValue(); - unsigned IsStream = cast(Op.getOperand(5))->getZExtValue(); - unsigned IsData = cast(Op.getOperand(6))->getZExtValue(); + unsigned IsWrite = Op.getConstantOperandVal(3); + unsigned Locality = Op.getConstantOperandVal(4); + unsigned IsStream = Op.getConstantOperandVal(5); + unsigned IsData = Op.getConstantOperandVal(6); unsigned PrfOp = (IsWrite << 4) | // Load/Store bit (!IsData << 3) | // IsDataCache bit (Locality << 1) | // Cache level bits @@ -5039,7 +5039,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); SDLoc dl(Op); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -5218,8 +5218,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); case Intrinsic::aarch64_sve_ptrue: - return getPTrue(DAG, dl, Op.getValueType(), - cast(Op.getOperand(1))->getZExtValue()); + return getPTrue(DAG, dl, Op.getValueType(), Op.getConstantOperandVal(1)); case Intrinsic::aarch64_sve_clz: return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(), Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); @@ -6478,7 +6477,7 @@ static unsigned getIntrinsicID(const SDNode *N) { default: return Intrinsic::not_intrinsic; case ISD::INTRINSIC_WO_CHAIN: { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); if (IID < Intrinsic::num_intrinsics) return IID; return Intrinsic::not_intrinsic; @@ -10009,7 +10008,7 @@ SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64); while (Depth--) @@ -10076,7 +10075,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); SDValue ReturnAddress; if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); @@ -10942,7 +10941,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); // Update the minimum and maximum lane number seen. - unsigned EltNo = cast(V.getOperand(1))->getZExtValue(); + unsigned EltNo = V.getConstantOperandVal(1); Source->MinElt = std::min(Source->MinElt, EltNo); Source->MaxElt = std::max(Source->MaxElt, EltNo); } @@ -13329,7 +13328,7 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, "Only cases that extract a fixed length vector are supported!"); EVT InVT = Op.getOperand(0).getValueType(); - unsigned Idx = cast(Op.getOperand(1))->getZExtValue(); + unsigned Idx = Op.getConstantOperandVal(1); unsigned Size = Op.getValueSizeInBits(); // If we don't have legal types yet, do nothing @@ -13375,7 +13374,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, "Only expect to lower inserts into scalable vectors!"); EVT InVT = Op.getOperand(1).getValueType(); - unsigned Idx = cast(Op.getOperand(2))->getZExtValue(); + unsigned Idx = Op.getConstantOperandVal(2); SDValue Vec0 = Op.getOperand(0); SDValue Vec1 = Op.getOperand(1); @@ -18399,8 +18398,8 @@ static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { // TODO: we want the operands of the Cmp not the csel SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); SetCCInfo.IsAArch64 = true; - SetCCInfo.Info.AArch64.CC = static_cast( - cast(Op.getOperand(2))->getZExtValue()); + SetCCInfo.Info.AArch64.CC = + static_cast(Op.getConstantOperandVal(2)); // Check that the operands matches the constraints: // (1) Both operands must be constants. @@ -21585,7 +21584,7 @@ static SDValue performNEONPostLDSTCombine(SDNode *N, bool IsDupOp = false; unsigned NewOpc = 0; unsigned NumVecs = 0; - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); switch (IntNo) { default: llvm_unreachable("unexpected intrinsic for Neon base update"); case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; @@ -22501,7 +22500,7 @@ static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, static SDValue performTBZCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { - unsigned Bit = cast(N->getOperand(2))->getZExtValue(); + unsigned Bit = N->getConstantOperandVal(2); bool Invert = false; SDValue TestSrc = N->getOperand(1); SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG); @@ -23789,7 +23788,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, return performMULLCombine(N, DCI, DAG); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { case Intrinsic::aarch64_sve_prfb_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/); case Intrinsic::aarch64_sve_prfh_gather_scalar_offset: @@ -23940,8 +23939,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED); case Intrinsic::aarch64_rndr: case Intrinsic::aarch64_rndrrs: { - unsigned IntrinsicID = - cast(N->getOperand(1))->getZExtValue(); + unsigned IntrinsicID = N->getConstantOperandVal(1); auto Register = (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR : AArch64SysReg::RNDRRS); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index b0eac567ec9f1..28604af2cdb3c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -377,7 +377,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, return Subtarget->getRegisterInfo()->getRegClass(RegClass); } case AMDGPU::REG_SEQUENCE: { - unsigned RCID = cast(N->getOperand(0))->getZExtValue(); + unsigned RCID = N->getConstantOperandVal(0); const TargetRegisterClass *SuperRC = Subtarget->getRegisterInfo()->getRegClass(RCID); @@ -2672,7 +2672,7 @@ void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) { } void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) { - unsigned IntrID = cast(N->getOperand(1))->getZExtValue(); + unsigned IntrID = N->getConstantOperandVal(1); switch (IntrID) { case Intrinsic::amdgcn_ds_append: case Intrinsic::amdgcn_ds_consume: { @@ -2690,7 +2690,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) { } void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) { - unsigned IntrID = cast(N->getOperand(0))->getZExtValue(); + unsigned IntrID = N->getConstantOperandVal(0); unsigned Opcode; switch (IntrID) { case Intrinsic::amdgcn_wqm: @@ -2731,7 +2731,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) { } void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) { - unsigned IntrID = cast(N->getOperand(1))->getZExtValue(); + unsigned IntrID = N->getConstantOperandVal(1); switch (IntrID) { case Intrinsic::amdgcn_ds_gws_init: case Intrinsic::amdgcn_ds_gws_barrier: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 541a5b62450dd..8fbc90a6db9fd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -682,7 +682,7 @@ static bool hasSourceMods(const SDNode *N) { case ISD::BITCAST: return false; case ISD::INTRINSIC_WO_CHAIN: { - switch (cast(N->getOperand(0))->getZExtValue()) { + switch (N->getConstantOperandVal(0)) { case Intrinsic::amdgcn_interp_p1: case Intrinsic::amdgcn_interp_p2: case Intrinsic::amdgcn_interp_mov: @@ -837,7 +837,7 @@ bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const { case ISD::TokenFactor: return true; case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntrID = cast(N->getOperand(0))->getZExtValue(); + unsigned IntrID = N->getConstantOperandVal(0); switch (IntrID) { case Intrinsic::amdgcn_readfirstlane: case Intrinsic::amdgcn_readlane: @@ -1489,7 +1489,7 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { SDLoc SL(Op); SmallVector Args; - unsigned Start = cast(Op.getOperand(1))->getZExtValue(); + unsigned Start = Op.getConstantOperandVal(1); EVT VT = Op.getValueType(); EVT SrcVT = Op.getOperand(0).getValueType(); @@ -2502,8 +2502,7 @@ static bool valueIsKnownNeverF32Denorm(SDValue Src) { case ISD::FFREXP: return true; case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntrinsicID = - cast(Src.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Src.getConstantOperandVal(0); switch (IntrinsicID) { case Intrinsic::amdgcn_frexp_mant: return true; @@ -3601,7 +3600,7 @@ static SDValue simplifyMul24(SDNode *Node24, SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1); unsigned NewOpcode = Node24->getOpcode(); if (IsIntrin) { - unsigned IID = cast(Node24->getOperand(0))->getZExtValue(); + unsigned IID = Node24->getConstantOperandVal(0); switch (IID) { case Intrinsic::amdgcn_mul_i24: NewOpcode = AMDGPUISD::MUL_I24; @@ -3821,7 +3820,7 @@ SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine( SDNode *N, DAGCombinerInfo &DCI) const { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); switch (IID) { case Intrinsic::amdgcn_mul_i24: case Intrinsic::amdgcn_mul_u24: @@ -5652,7 +5651,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode( break; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IID = cast(Op.getOperand(0))->getZExtValue(); + unsigned IID = Op.getConstantOperandVal(0); switch (IID) { case Intrinsic::amdgcn_workitem_id_x: case Intrinsic::amdgcn_workitem_id_y: @@ -5834,8 +5833,7 @@ bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, return SNaN; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntrinsicID - = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(0); // TODO: Handle more intrinsics switch (IntrinsicID) { case Intrinsic::amdgcn_cubeid: diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp index c1ba9c514874e..9a2fb0bc37b2c 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -424,8 +424,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const return lowerADDRSPACECAST(Op, DAG); case ISD::INTRINSIC_VOID: { SDValue Chain = Op.getOperand(0); - unsigned IntrinsicID = - cast(Op.getOperand(1))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(1); switch (IntrinsicID) { case Intrinsic::r600_store_swizzle: { SDLoc DL(Op); @@ -449,8 +448,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const break; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntrinsicID = - cast(Op.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(0); EVT VT = Op.getValueType(); SDLoc DL(Op); switch (IntrinsicID) { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index f3547db9e9bd9..2cb08e025dd90 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5388,7 +5388,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { return SDValue(); // Get the rounding mode from the last operand - int RoundMode = cast(Op.getOperand(1))->getZExtValue(); + int RoundMode = Op.getConstantOperandVal(1); if (RoundMode == (int)RoundingMode::TowardPositive) Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD; else if (RoundMode == (int)RoundingMode::TowardNegative) @@ -5698,7 +5698,7 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N, return; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); switch (IID) { case Intrinsic::amdgcn_make_buffer_rsrc: Results.push_back(lowerPointerAsRsrcIntrin(N, DAG)); @@ -5836,7 +5836,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) { unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { - switch (cast(Intr->getOperand(1))->getZExtValue()) { + switch (Intr->getConstantOperandVal(1)) { case Intrinsic::amdgcn_if: return AMDGPUISD::IF; case Intrinsic::amdgcn_else: @@ -5985,7 +5985,7 @@ SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); // Checking the depth - if (cast(Op.getOperand(0))->getZExtValue() != 0) + if (Op.getConstantOperandVal(0) != 0) return DAG.getConstant(0, DL, VT); MachineFunction &MF = DAG.getMachineFunction(); @@ -7634,7 +7634,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned IntrinsicID = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(0); // TODO: Should this propagate fast-math-flags? @@ -7788,7 +7788,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return DAG.getConstant(MF.getSubtarget().getWavefrontSize(), SDLoc(Op), MVT::i32); case Intrinsic::amdgcn_s_buffer_load: { - unsigned CPol = cast(Op.getOperand(3))->getZExtValue(); + unsigned CPol = Op.getConstantOperandVal(3); if (CPol & ~((Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12) ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12)) @@ -8038,7 +8038,7 @@ SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { - unsigned IntrID = cast(Op.getOperand(1))->getZExtValue(); + unsigned IntrID = Op.getConstantOperandVal(1); SDLoc DL(Op); switch (IntrID) { @@ -8134,8 +8134,8 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, } case Intrinsic::amdgcn_buffer_load: case Intrinsic::amdgcn_buffer_load_format: { - unsigned Glc = cast(Op.getOperand(5))->getZExtValue(); - unsigned Slc = cast(Op.getOperand(6))->getZExtValue(); + unsigned Glc = Op.getConstantOperandVal(5); + unsigned Slc = Op.getConstantOperandVal(6); unsigned IdxEn = getIdxEn(Op.getOperand(3)); SDValue Ops[] = { Op.getOperand(0), // Chain @@ -8223,10 +8223,10 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, EVT LoadVT = Op.getValueType(); auto SOffset = selectSOffset(Op.getOperand(5), DAG, Subtarget); - unsigned Dfmt = cast(Op.getOperand(7))->getZExtValue(); - unsigned Nfmt = cast(Op.getOperand(8))->getZExtValue(); - unsigned Glc = cast(Op.getOperand(9))->getZExtValue(); - unsigned Slc = cast(Op.getOperand(10))->getZExtValue(); + unsigned Dfmt = Op.getConstantOperandVal(7); + unsigned Nfmt = Op.getConstantOperandVal(8); + unsigned Glc = Op.getConstantOperandVal(9); + unsigned Slc = Op.getConstantOperandVal(10); unsigned IdxEn = getIdxEn(Op.getOperand(3)); SDValue Ops[] = { Op.getOperand(0), // Chain @@ -8313,7 +8313,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, case Intrinsic::amdgcn_buffer_atomic_or: case Intrinsic::amdgcn_buffer_atomic_xor: case Intrinsic::amdgcn_buffer_atomic_fadd: { - unsigned Slc = cast(Op.getOperand(6))->getZExtValue(); + unsigned Slc = Op.getConstantOperandVal(6); unsigned IdxEn = getIdxEn(Op.getOperand(4)); SDValue Ops[] = { Op.getOperand(0), // Chain @@ -8474,7 +8474,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); case Intrinsic::amdgcn_buffer_atomic_cmpswap: { - unsigned Slc = cast(Op.getOperand(7))->getZExtValue(); + unsigned Slc = Op.getConstantOperandVal(7); unsigned IdxEn = getIdxEn(Op.getOperand(5)); SDValue Ops[] = { Op.getOperand(0), // Chain @@ -8878,7 +8878,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); SDValue Chain = Op.getOperand(0); - unsigned IntrinsicID = cast(Op.getOperand(1))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(1); MachineFunction &MF = DAG.getMachineFunction(); switch (IntrinsicID) { @@ -8943,10 +8943,10 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); if (IsD16) VData = handleD16VData(VData, DAG); - unsigned Dfmt = cast(Op.getOperand(8))->getZExtValue(); - unsigned Nfmt = cast(Op.getOperand(9))->getZExtValue(); - unsigned Glc = cast(Op.getOperand(10))->getZExtValue(); - unsigned Slc = cast(Op.getOperand(11))->getZExtValue(); + unsigned Dfmt = Op.getConstantOperandVal(8); + unsigned Nfmt = Op.getConstantOperandVal(9); + unsigned Glc = Op.getConstantOperandVal(10); + unsigned Slc = Op.getConstantOperandVal(11); unsigned IdxEn = getIdxEn(Op.getOperand(4)); SDValue Ops[] = { Chain, @@ -9029,8 +9029,8 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); if (IsD16) VData = handleD16VData(VData, DAG); - unsigned Glc = cast(Op.getOperand(6))->getZExtValue(); - unsigned Slc = cast(Op.getOperand(7))->getZExtValue(); + unsigned Glc = Op.getConstantOperandVal(6); + unsigned Slc = Op.getConstantOperandVal(7); unsigned IdxEn = getIdxEn(Op.getOperand(4)); SDValue Ops[] = { Chain, @@ -12069,8 +12069,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, return false; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntrinsicID - = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(0); // TODO: Handle more intrinsics switch (IntrinsicID) { case Intrinsic::amdgcn_cvt_pkrtz: @@ -15008,7 +15007,7 @@ void SITargetLowering::computeKnownBitsForTargetNode(const SDValue Op, unsigned Opc = Op.getOpcode(); switch (Opc) { case ISD::INTRINSIC_WO_CHAIN: { - unsigned IID = cast(Op.getOperand(0))->getZExtValue(); + unsigned IID = Op.getConstantOperandVal(0); switch (IID) { case Intrinsic::amdgcn_mbcnt_lo: case Intrinsic::amdgcn_mbcnt_hi: { @@ -15251,11 +15250,9 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode *N, case ISD::CALLSEQ_END: return true; case ISD::INTRINSIC_WO_CHAIN: - return AMDGPU::isIntrinsicSourceOfDivergence( - cast(N->getOperand(0))->getZExtValue()); + return AMDGPU::isIntrinsicSourceOfDivergence(N->getConstantOperandVal(0)); case ISD::INTRINSIC_W_CHAIN: - return AMDGPU::isIntrinsicSourceOfDivergence( - cast(N->getOperand(1))->getZExtValue()); + return AMDGPU::isIntrinsicSourceOfDivergence(N->getConstantOperandVal(1)); case AMDGPUISD::ATOMIC_CMP_SWAP: case AMDGPUISD::ATOMIC_LOAD_FMIN: case AMDGPUISD::ATOMIC_LOAD_FMAX: diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 2fb3957a1ca9d..aae6f2e842fd1 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -273,8 +273,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, // subtract the index by one. Offset0Idx -= get(Opc0).NumDefs; Offset1Idx -= get(Opc1).NumDefs; - Offset0 = cast(Load0->getOperand(Offset0Idx))->getZExtValue(); - Offset1 = cast(Load1->getOperand(Offset1Idx))->getZExtValue(); + Offset0 = Load0->getConstantOperandVal(Offset0Idx); + Offset1 = Load1->getConstantOperandVal(Offset1Idx); return true; } diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp index 5d9a366f5ed54..2265f5db6737e 100644 --- a/llvm/lib/Target/ARC/ARCISelLowering.cpp +++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp @@ -751,7 +751,7 @@ SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); - assert(cast(Op.getOperand(0))->getZExtValue() == 0 && + assert(Op.getConstantOperandVal(0) == 0 && "Only support lowering frame addr of current frame."); Register FrameReg = ARI.getFrameRegister(MF); return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index a0776296b8ebc..ef02dc9970114 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -4499,8 +4499,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, default: break; case ARM::LDRrs: case ARM::LDRBrs: { - unsigned ShOpVal = - cast(DefNode->getOperand(2))->getZExtValue(); + unsigned ShOpVal = DefNode->getConstantOperandVal(2); unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); if (ShImm == 0 || (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) @@ -4512,8 +4511,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::t2LDRHs: case ARM::t2LDRSHs: { // Thumb2 mode: lsl only. - unsigned ShAmt = - cast(DefNode->getOperand(2))->getZExtValue(); + unsigned ShAmt = DefNode->getConstantOperandVal(2); if (ShAmt == 0 || ShAmt == 2) Latency = *Latency - 1; break; @@ -4526,8 +4524,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, default: break; case ARM::LDRrs: case ARM::LDRBrs: { - unsigned ShOpVal = - cast(DefNode->getOperand(2))->getZExtValue(); + unsigned ShOpVal = DefNode->getConstantOperandVal(2); unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); if (ShImm == 0 || ((ShImm == 1 || ShImm == 2 || ShImm == 3) && diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index 984d8d3e0b08c..adc429b61bbcc 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2422,8 +2422,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating, MachineMemOperand *MemOp = cast(N)->getMemOperand(); SDValue Chain = N->getOperand(0); - unsigned Lane = - cast(N->getOperand(Vec0Idx + NumVecs))->getZExtValue(); + unsigned Lane = N->getConstantOperandVal(Vec0Idx + NumVecs); EVT VT = N->getOperand(Vec0Idx).getValueType(); bool is64BitVector = VT.is64BitVector(); @@ -2587,7 +2586,7 @@ void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, Ops.push_back(N->getOperand(2)); // vector of base addresses - int32_t ImmValue = cast(N->getOperand(3))->getZExtValue(); + int32_t ImmValue = N->getConstantOperandVal(3); Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset if (Predicated) @@ -2622,7 +2621,7 @@ void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode, // The shift count if (Immediate) { - int32_t ImmValue = cast(N->getOperand(3))->getZExtValue(); + int32_t ImmValue = N->getConstantOperandVal(3); Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count } else { Ops.push_back(N->getOperand(3)); @@ -2630,7 +2629,7 @@ void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode, // The immediate saturation operand, if any if (HasSaturationOperand) { - int32_t SatOp = cast(N->getOperand(4))->getZExtValue(); + int32_t SatOp = N->getConstantOperandVal(4); int SatBit = (SatOp == 64 ? 0 : 1); Ops.push_back(getI32Imm(SatBit, Loc)); } @@ -2685,7 +2684,7 @@ void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) { // and then an immediate shift count Ops.push_back(N->getOperand(1)); Ops.push_back(N->getOperand(2)); - int32_t ImmValue = cast(N->getOperand(3))->getZExtValue(); + int32_t ImmValue = N->getConstantOperandVal(3); Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count if (Predicated) @@ -4138,14 +4137,13 @@ void ARMDAGToDAGISel::Select(SDNode *N) { if (InGlue.getOpcode() == ARMISD::CMPZ) { if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) { SDValue Int = InGlue.getOperand(0); - uint64_t ID = cast(Int->getOperand(1))->getZExtValue(); + uint64_t ID = Int->getConstantOperandVal(1); // Handle low-overhead loops. if (ID == Intrinsic::loop_decrement_reg) { SDValue Elements = Int.getOperand(2); - SDValue Size = CurDAG->getTargetConstant( - cast(Int.getOperand(3))->getZExtValue(), dl, - MVT::i32); + SDValue Size = CurDAG->getTargetConstant(Int.getConstantOperandVal(3), + dl, MVT::i32); SDValue Args[] = { Elements, Size, Int.getOperand(0) }; SDNode *LoopDec = @@ -4715,7 +4713,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) { case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); switch (IntNo) { default: break; @@ -4732,9 +4730,9 @@ void ARMDAGToDAGISel::Select(SDNode *N) { Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2); SmallVector Ops; - Ops.push_back(getI32Imm(cast(N->getOperand(2))->getZExtValue(), dl)); /* coproc */ - Ops.push_back(getI32Imm(cast(N->getOperand(3))->getZExtValue(), dl)); /* opc */ - Ops.push_back(getI32Imm(cast(N->getOperand(4))->getZExtValue(), dl)); /* CRm */ + Ops.push_back(getI32Imm(N->getConstantOperandVal(2), dl)); /* coproc */ + Ops.push_back(getI32Imm(N->getConstantOperandVal(3), dl)); /* opc */ + Ops.push_back(getI32Imm(N->getConstantOperandVal(4), dl)); /* CRm */ // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded // instruction will always be '1111' but it is possible in assembly language to specify @@ -5181,7 +5179,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) { } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(0); switch (IntNo) { default: break; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index d00b7853816e1..cf9646a0b81ed 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -4110,7 +4110,7 @@ SDValue ARMTargetLowering::LowerINTRINSIC_VOID( SDValue ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); SDLoc dl(Op); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -4289,13 +4289,13 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, return Op.getOperand(0); SDLoc dl(Op); - unsigned isRead = ~cast(Op.getOperand(2))->getZExtValue() & 1; + unsigned isRead = ~Op.getConstantOperandVal(2) & 1; if (!isRead && (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) // ARMv7 with MP extension has PLDW. return Op.getOperand(0); - unsigned isData = cast(Op.getOperand(4))->getZExtValue(); + unsigned isData = Op.getConstantOperandVal(4); if (Subtarget->isThumb()) { // Invert the bits. isRead = ~isRead & 1; @@ -4800,7 +4800,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, LHS->hasOneUse() && isa(LHS.getOperand(1)) && LHS.getValueType() == MVT::i32 && isa(RHS) && !isSignedIntSetCC(CC)) { - unsigned Mask = cast(LHS.getOperand(1))->getZExtValue(); + unsigned Mask = LHS.getConstantOperandVal(1); auto *RHSC = cast(RHS.getNode()); uint64_t RHSV = RHSC->getZExtValue(); if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { @@ -4823,9 +4823,8 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, isa(RHS) && cast(RHS)->getZExtValue() == 0x80000000U && CC == ISD::SETUGT && isa(LHS.getOperand(1)) && - cast(LHS.getOperand(1))->getZExtValue() < 31) { - unsigned ShiftAmt = - cast(LHS.getOperand(1))->getZExtValue() + 1; + LHS.getConstantOperandVal(1) < 31) { + unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1; SDValue Shift = DAG.getNode(ARMISD::LSLS, dl, DAG.getVTList(MVT::i32, MVT::i32), LHS.getOperand(0), @@ -6112,7 +6111,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = DAG.getConstant(4, dl, MVT::i32); @@ -6135,7 +6134,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); // FIXME probably not meaningful - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); Register FrameReg = ARI.getFrameRegister(MF); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); while (Depth--) @@ -8221,7 +8220,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); // Update the minimum and maximum lane number seen. - unsigned EltNo = cast(V.getOperand(1))->getZExtValue(); + unsigned EltNo = V.getConstantOperandVal(1); Source->MinElt = std::min(Source->MinElt, EltNo); Source->MaxElt = std::max(Source->MaxElt, EltNo); } @@ -9034,7 +9033,7 @@ static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, SDValue Conv = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); - unsigned Lane = cast(Op.getOperand(2))->getZExtValue(); + unsigned Lane = Op.getConstantOperandVal(2); unsigned LaneWidth = getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; @@ -9097,7 +9096,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, SDValue Conv = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); - unsigned Lane = cast(Op.getOperand(1))->getZExtValue(); + unsigned Lane = Op.getConstantOperandVal(1); unsigned LaneWidth = getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv, @@ -10682,7 +10681,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) { - unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(0); unsigned Opc = 0; if (IntNo == Intrinsic::arm_smlald) Opc = ARMISD::SMLALD; @@ -14908,7 +14907,7 @@ static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { ConstantSDNode *N11C = dyn_cast(N1.getOperand(1)); if (!N11C) return SDValue(); - unsigned InvMask = cast(N->getOperand(2))->getZExtValue(); + unsigned InvMask = N->getConstantOperandVal(2); unsigned LSB = llvm::countr_zero(~InvMask); unsigned Width = llvm::bit_width(~InvMask) - LSB; assert(Width < @@ -15448,8 +15447,7 @@ static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, EVT VT = N->getValueType(0); SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); - ARMCC::CondCodes Cond = - (ARMCC::CondCodes)cast(N->getOperand(2))->getZExtValue(); + ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(2); SDLoc dl(N); // vcmp X, 0, cc -> vcmpz X, cc @@ -15794,7 +15792,7 @@ static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, unsigned NewOpc = 0; unsigned NumVecs = 0; if (Target.isIntrinsic) { - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); switch (IntNo) { default: llvm_unreachable("unexpected intrinsic for Neon base update"); @@ -16254,12 +16252,10 @@ static SDValue PerformMVEVLDCombine(SDNode *N, // For the stores, where there are multiple intrinsics we only actually want // to post-inc the last of the them. - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); - if (IntNo == Intrinsic::arm_mve_vst2q && - cast(N->getOperand(5))->getZExtValue() != 1) + unsigned IntNo = N->getConstantOperandVal(1); + if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(5) != 1) return SDValue(); - if (IntNo == Intrinsic::arm_mve_vst4q && - cast(N->getOperand(7))->getZExtValue() != 3) + if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(7) != 3) return SDValue(); // Search for a use of the address operand that is an increment. @@ -16381,7 +16377,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { return false; unsigned NumVecs = 0; unsigned NewOpc = 0; - unsigned IntNo = cast(VLD->getOperand(1))->getZExtValue(); + unsigned IntNo = VLD->getConstantOperandVal(1); if (IntNo == Intrinsic::arm_neon_vld2lane) { NumVecs = 2; NewOpc = ARMISD::VLD2DUP; @@ -16397,8 +16393,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // First check that all the vldN-lane uses are VDUPLANEs and that the lane // numbers match the load. - unsigned VLDLaneNo = - cast(VLD->getOperand(NumVecs+3))->getZExtValue(); + unsigned VLDLaneNo = VLD->getConstantOperandVal(NumVecs + 3); for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); UI != UE; ++UI) { // Ignore uses of the chain result. @@ -16406,7 +16401,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { continue; SDNode *User = *UI; if (User->getOpcode() != ARMISD::VDUPLANE || - VLDLaneNo != cast(User->getOperand(1))->getZExtValue()) + VLDLaneNo != User->getConstantOperandVal(1)) return false; } @@ -16479,7 +16474,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, // Make sure the VMOV element size is not bigger than the VDUPLANE elements. unsigned EltSize = Op.getScalarValueSizeInBits(); // The canonical VMOV for a zero vector uses a 32-bit element size. - unsigned Imm = cast(Op.getOperand(0))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(0); unsigned EltBits; if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0) EltSize = 8; @@ -17479,7 +17474,7 @@ static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(0); switch (IntNo) { default: // Don't do anything for most intrinsics. @@ -17669,7 +17664,7 @@ SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, case Intrinsic::arm_mve_addv: { // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, // which allow PerformADDVecReduce to turn it into VADDLV when possible. - bool Unsigned = cast(N->getOperand(2))->getZExtValue(); + bool Unsigned = N->getConstantOperandVal(2); unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1)); } @@ -17678,7 +17673,7 @@ SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, case Intrinsic::arm_mve_addlv_predicated: { // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR // which recombines the two outputs into an i64 - bool Unsigned = cast(N->getOperand(2))->getZExtValue(); + bool Unsigned = N->getConstantOperandVal(2); unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); @@ -18193,7 +18188,7 @@ static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); } case ISD::INTRINSIC_W_CHAIN: { - unsigned IntOp = cast(N.getOperand(1))->getZExtValue(); + unsigned IntOp = N.getConstantOperandVal(1); if (IntOp != Intrinsic::test_start_loop_iterations && IntOp != Intrinsic::loop_decrement_reg) return SDValue(); @@ -18271,7 +18266,7 @@ static SDValue PerformHWLoopCombine(SDNode *N, SDLoc dl(Int); SelectionDAG &DAG = DCI.DAG; SDValue Elements = Int.getOperand(2); - unsigned IntOp = cast(Int->getOperand(1))->getZExtValue(); + unsigned IntOp = Int->getConstantOperandVal(1); assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) && "expected single br user"); SDNode *Br = *N->use_begin(); @@ -18305,8 +18300,8 @@ static SDValue PerformHWLoopCombine(SDNode *N, DAG.ReplaceAllUsesOfValueWith(Int.getValue(2), Int.getOperand(0)); return Res; } else { - SDValue Size = DAG.getTargetConstant( - cast(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); + SDValue Size = + DAG.getTargetConstant(Int.getConstantOperandVal(3), dl, MVT::i32); SDValue Args[] = { Int.getOperand(0), Elements, Size, }; SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl, DAG.getVTList(MVT::i32, MVT::Other), Args); @@ -19051,7 +19046,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, } case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { case Intrinsic::arm_neon_vld1: case Intrinsic::arm_neon_vld1x2: case Intrinsic::arm_neon_vld1x3: diff --git a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp index 196122e45ab8d..e67a1e2ed5090 100644 --- a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp +++ b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp @@ -335,7 +335,7 @@ template <> bool AVRDAGToDAGISel::select(SDNode *N) { return false; } - int CST = (int)cast(BasePtr.getOperand(1))->getZExtValue(); + int CST = (int)BasePtr.getConstantOperandVal(1); SDValue Chain = ST->getChain(); EVT VT = ST->getValue().getValueType(); SDLoc DL(N); diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp index cd1dcfaea0eb1..d36bfb188ed36 100644 --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -298,8 +298,7 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const { SDValue SrcHi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0), DAG.getConstant(1, dl, MVT::i16)); - uint64_t ShiftAmount = - cast(N->getOperand(1))->getZExtValue(); + uint64_t ShiftAmount = N->getConstantOperandVal(1); if (ShiftAmount == 16) { // Special case these two operations because they appear to be used by the // generic codegen parts to lower 32-bit numbers. @@ -367,7 +366,7 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const { } } - uint64_t ShiftAmount = cast(N->getOperand(1))->getZExtValue(); + uint64_t ShiftAmount = N->getConstantOperandVal(1); SDValue Victim = N->getOperand(0); switch (Op.getOpcode()) { diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp index 909c7c005735b..d8139958e9fcf 100644 --- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -193,7 +193,7 @@ void BPFDAGToDAGISel::Select(SDNode *Node) { default: break; case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(1); switch (IntNo) { case Intrinsic::bpf_load_byte: case Intrinsic::bpf_load_half: @@ -469,7 +469,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node, if (BaseV.getOpcode() != ISD::INTRINSIC_W_CHAIN) return; - unsigned IntNo = cast(BaseV->getOperand(1))->getZExtValue(); + unsigned IntNo = BaseV->getConstantOperandVal(1); uint64_t MaskV = MaskN->getZExtValue(); if (!((IntNo == Intrinsic::bpf_load_byte && MaskV == 0xFF) || diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp index e3b4a2dc048ab..90f70b83a02d3 100644 --- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp @@ -1219,7 +1219,7 @@ SDValue CSKYTargetLowering::LowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); Register FrameReg = RI.getFrameRegister(MF); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); while (Depth--) @@ -1240,7 +1240,7 @@ SDValue CSKYTargetLowering::LowerRETURNADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = DAG.getConstant(4, dl, MVT::i32); diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index f930015026a5c..eb5c59672224e 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -192,7 +192,7 @@ MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) { return nullptr; SDLoc dl(IntN); - unsigned IntNo = cast(IntN->getOperand(1))->getZExtValue(); + unsigned IntNo = IntN->getConstantOperandVal(1); static std::map LoadPciMap = { { Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci }, @@ -284,18 +284,18 @@ bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) { // can provide an address of an unsigned variable to store the result of // a sign-extending intrinsic into (or the other way around). ISD::LoadExtType IntExt; - switch (cast(C->getOperand(1))->getZExtValue()) { - case Intrinsic::hexagon_circ_ldub: - case Intrinsic::hexagon_circ_lduh: - IntExt = ISD::ZEXTLOAD; - break; - case Intrinsic::hexagon_circ_ldw: - case Intrinsic::hexagon_circ_ldd: - IntExt = ISD::NON_EXTLOAD; - break; - default: - IntExt = ISD::SEXTLOAD; - break; + switch (C->getConstantOperandVal(1)) { + case Intrinsic::hexagon_circ_ldub: + case Intrinsic::hexagon_circ_lduh: + IntExt = ISD::ZEXTLOAD; + break; + case Intrinsic::hexagon_circ_ldw: + case Intrinsic::hexagon_circ_ldd: + IntExt = ISD::NON_EXTLOAD; + break; + default: + IntExt = ISD::SEXTLOAD; + break; } if (N->getExtensionType() != IntExt) return false; @@ -325,7 +325,7 @@ bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) { return false; const SDLoc &dl(IntN); - unsigned IntNo = cast(IntN->getOperand(1))->getZExtValue(); + unsigned IntNo = IntN->getConstantOperandVal(1); static const std::map LoadBrevMap = { { Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr }, @@ -366,7 +366,7 @@ bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) { return false; SDLoc DL(IntN); - unsigned IntNo = cast(IntN->getOperand(1))->getZExtValue(); + unsigned IntNo = IntN->getConstantOperandVal(1); SmallVector Ops; static std::map LoadNPcMap = { @@ -641,7 +641,7 @@ void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) { if (SelectNewCircIntrinsic(N)) return; - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); if (IntNo == Intrinsic::hexagon_V6_vgathermw || IntNo == Intrinsic::hexagon_V6_vgathermw_128B || IntNo == Intrinsic::hexagon_V6_vgathermh || @@ -665,7 +665,7 @@ void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) { } void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); unsigned Bits; switch (IID) { case Intrinsic::hexagon_S2_vsplatrb: diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp index e08566718d7cd..fb156f2583e8a 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -2895,7 +2895,7 @@ void HexagonDAGToDAGISel::SelectV65GatherPred(SDNode *N) { SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32); unsigned Opcode; - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); switch (IntNo) { default: llvm_unreachable("Unexpected HVX gather intrinsic."); @@ -2934,7 +2934,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) { SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32); unsigned Opcode; - unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(1); switch (IntNo) { default: llvm_unreachable("Unexpected HVX gather intrinsic."); @@ -2963,7 +2963,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) { } void HexagonDAGToDAGISel::SelectHVXDualOutput(SDNode *N) { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); SDNode *Result; switch (IID) { case Intrinsic::hexagon_V6_vaddcarry: { diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index a7d452e7227d7..51138091f4a55 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -669,8 +669,7 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { --NumOps; // Ignore the flag operand. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - const InlineAsm::Flag Flags( - cast(Op.getOperand(i))->getZExtValue()); + const InlineAsm::Flag Flags(Op.getConstantOperandVal(i)); unsigned NumVals = Flags.getNumOperandRegisters(); ++i; // Skip the ID value. @@ -729,7 +728,7 @@ SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op, SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); - unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(1); // Lower the hexagon_prefetch builtin to DCFETCH, as above. if (IntNo == Intrinsic::hexagon_prefetch) { SDValue Addr = Op.getOperand(2); @@ -1176,7 +1175,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = DAG.getConstant(4, dl, MVT::i32); @@ -1198,7 +1197,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, HRI.getFrameRegister(), VT); while (Depth--) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index db416a500f597..665e2d79c83d1 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -2127,7 +2127,7 @@ HexagonTargetLowering::LowerHvxFunnelShift(SDValue Op, SDValue HexagonTargetLowering::LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const { const SDLoc &dl(Op); - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); SmallVector Ops(Op->ops().begin(), Op->ops().end()); auto Swap = [&](SDValue P) { @@ -2922,7 +2922,7 @@ SDValue HexagonTargetLowering::RemoveTLWrapper(SDValue Op, SelectionDAG &DAG) const { assert(Op.getOpcode() == HexagonISD::TL_EXTEND || Op.getOpcode() == HexagonISD::TL_TRUNCATE); - unsigned Opc = cast(Op.getOperand(2))->getZExtValue(); + unsigned Opc = Op.getConstantOperandVal(2); return DAG.getNode(Opc, SDLoc(Op), ty(Op), Op.getOperand(0)); } diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp index cbb5c2b998e27..17d7ffb586f4e 100644 --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -1057,7 +1057,7 @@ SDValue LanaiTargetLowering::LowerRETURNADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); const unsigned Offset = -4; @@ -1080,7 +1080,7 @@ SDValue LanaiTargetLowering::LowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Lanai::FP, VT); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); while (Depth--) { const unsigned Offset = -8; SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 80853ee319877..e14bbadf9ed22 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -588,7 +588,7 @@ SDValue LoongArchTargetLowering::lowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); int GRLenInBytes = Subtarget.getGRLen() / 8; while (Depth--) { @@ -607,7 +607,7 @@ SDValue LoongArchTargetLowering::lowerRETURNADDR(SDValue Op, return SDValue(); // Currently only support lowering return address for current frame. - if (cast(Op.getOperand(0))->getZExtValue() != 0) { + if (Op.getConstantOperandVal(0) != 0) { DAG.getContext()->emitError( "return address can only be determined for the current frame"); return SDValue(); @@ -1263,7 +1263,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, return emitIntrinsicWithChainErrorMessage(Op, ErrorMsgReqLA64, DAG); case Intrinsic::loongarch_csrrd_w: case Intrinsic::loongarch_csrrd_d: { - unsigned Imm = cast(Op.getOperand(2))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(2); return !isUInt<14>(Imm) ? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG) : DAG.getNode(LoongArchISD::CSRRD, DL, {GRLenVT, MVT::Other}, @@ -1271,7 +1271,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, } case Intrinsic::loongarch_csrwr_w: case Intrinsic::loongarch_csrwr_d: { - unsigned Imm = cast(Op.getOperand(3))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(3); return !isUInt<14>(Imm) ? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG) : DAG.getNode(LoongArchISD::CSRWR, DL, {GRLenVT, MVT::Other}, @@ -1280,7 +1280,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, } case Intrinsic::loongarch_csrxchg_w: case Intrinsic::loongarch_csrxchg_d: { - unsigned Imm = cast(Op.getOperand(4))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(4); return !isUInt<14>(Imm) ? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG) : DAG.getNode(LoongArchISD::CSRXCHG, DL, {GRLenVT, MVT::Other}, @@ -1306,7 +1306,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, {Chain, Op.getOperand(2)}); } case Intrinsic::loongarch_lddir_d: { - unsigned Imm = cast(Op.getOperand(3))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(3); return !isUInt<8>(Imm) ? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG) : Op; @@ -1314,7 +1314,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, case Intrinsic::loongarch_movfcsr2gr: { if (!Subtarget.hasBasicF()) return emitIntrinsicWithChainErrorMessage(Op, ErrorMsgReqF, DAG); - unsigned Imm = cast(Op.getOperand(2))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(2); return !isUInt<2>(Imm) ? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG) : DAG.getNode(LoongArchISD::MOVFCSR2GR, DL, {VT, MVT::Other}, @@ -1460,7 +1460,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op, ASRT_LE_GT_CASE(asrtgt_d) #undef ASRT_LE_GT_CASE case Intrinsic::loongarch_ldpte_d: { - unsigned Imm = cast(Op.getOperand(3))->getZExtValue(); + unsigned Imm = Op.getConstantOperandVal(3); return !Subtarget.is64Bit() ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) : !isUInt<8>(Imm) ? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG) @@ -1473,53 +1473,53 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op, : SDValue(); case Intrinsic::loongarch_lasx_xvstelm_b: return (!isInt<8>(cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<5>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<5>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG) : SDValue(); case Intrinsic::loongarch_lsx_vstelm_b: return (!isInt<8>(cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<4>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<4>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG) : SDValue(); case Intrinsic::loongarch_lasx_xvstelm_h: return (!isShiftedInt<8, 1>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<4>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<4>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 2", DAG) : SDValue(); case Intrinsic::loongarch_lsx_vstelm_h: return (!isShiftedInt<8, 1>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<3>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<3>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 2", DAG) : SDValue(); case Intrinsic::loongarch_lasx_xvstelm_w: return (!isShiftedInt<8, 2>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<3>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<3>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 4", DAG) : SDValue(); case Intrinsic::loongarch_lsx_vstelm_w: return (!isShiftedInt<8, 2>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<2>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<2>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 4", DAG) : SDValue(); case Intrinsic::loongarch_lasx_xvstelm_d: return (!isShiftedInt<8, 3>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<2>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<2>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 8", DAG) : SDValue(); case Intrinsic::loongarch_lsx_vstelm_d: return (!isShiftedInt<8, 3>( cast(Op.getOperand(4))->getSExtValue()) || - !isUInt<1>(cast(Op.getOperand(5))->getZExtValue())) + !isUInt<1>(Op.getConstantOperandVal(5))) ? emitIntrinsicErrorMessage( Op, "argument out of range or not a multiple of 8", DAG) : SDValue(); @@ -1692,7 +1692,7 @@ replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp) { const StringRef ErrorMsgOOR = "argument out of range"; - unsigned Imm = cast(Node->getOperand(2))->getZExtValue(); + unsigned Imm = Node->getConstantOperandVal(2); if (!isUInt(Imm)) { emitErrorAndReplaceIntrinsicResults(Node, Results, DAG, ErrorMsgOOR, /*WithChain=*/false); @@ -1995,7 +1995,7 @@ void LoongArchTargetLowering::ReplaceNodeResults( break; } case Intrinsic::loongarch_csrwr_w: { - unsigned Imm = cast(N->getOperand(3))->getZExtValue(); + unsigned Imm = N->getConstantOperandVal(3); if (!isUInt<14>(Imm)) { emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR); return; @@ -2010,7 +2010,7 @@ void LoongArchTargetLowering::ReplaceNodeResults( break; } case Intrinsic::loongarch_csrxchg_w: { - unsigned Imm = cast(N->getOperand(4))->getZExtValue(); + unsigned Imm = N->getConstantOperandVal(4); if (!isUInt<14>(Imm)) { emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR); return; diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp index f42882dafa095..c4d7a0dec7f39 100644 --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -2278,8 +2278,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { isNullConstant(Cond.getOperand(1).getOperand(0))) { SDValue Cmp = Cond.getOperand(1); - unsigned CondCode = - cast(Cond.getOperand(0))->getZExtValue(); + unsigned CondCode = Cond.getConstantOperandVal(0); if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) && (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) { @@ -3388,7 +3387,7 @@ SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SDNode *Node = Op.getNode(); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - unsigned Align = cast(Op.getOperand(2))->getZExtValue(); + unsigned Align = Op.getConstantOperandVal(2); EVT VT = Node->getValueType(0); // Chain the dynamic stack allocation so that it doesn't modify the stack diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp index ee7762c296bf5..d3b59138a5a95 100644 --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -964,7 +964,7 @@ SDValue MSP430TargetLowering::LowerShifts(SDValue Op, if (!isa(N->getOperand(1))) return Op; - uint64_t ShiftAmount = cast(N->getOperand(1))->getZExtValue(); + uint64_t ShiftAmount = N->getConstantOperandVal(1); // Expand the stuff into sequence of shifts. SDValue Victim = N->getOperand(0); @@ -1269,7 +1269,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op, if (verifyReturnAddressArgumentIsConstant(Op, DAG)) return SDValue(); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); SDLoc dl(Op); EVT PtrVT = Op.getValueType(); @@ -1295,7 +1295,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc dl(Op); // FIXME probably not meaningful - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, MSP430::R4, VT); while (Depth--) diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index a0cab80243868..483eba4e4f479 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -2508,7 +2508,7 @@ SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const { SDValue MipsTargetLowering:: lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { // check the depth - if (cast(Op.getOperand(0))->getZExtValue() != 0) { + if (Op.getConstantOperandVal(0) != 0) { DAG.getContext()->emitError( "return address can be determined only for current frame"); return SDValue(); @@ -2529,7 +2529,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op, return SDValue(); // check the depth - if (cast(Op.getOperand(0))->getZExtValue() != 0) { + if (Op.getConstantOperandVal(0) != 0) { DAG.getContext()->emitError( "return address can be determined only for current frame"); return SDValue(); diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 8c865afd42079..0ed87ee0809a3 100644 --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -831,8 +831,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { } case ISD::INTRINSIC_W_CHAIN: { - const unsigned IntrinsicOpcode = - cast(Node->getOperand(1))->getZExtValue(); + const unsigned IntrinsicOpcode = Node->getConstantOperandVal(1); switch (IntrinsicOpcode) { default: break; @@ -885,7 +884,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { } case ISD::INTRINSIC_WO_CHAIN: { - switch (cast(Node->getOperand(0))->getZExtValue()) { + switch (Node->getConstantOperandVal(0)) { default: break; @@ -901,8 +900,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { } case ISD::INTRINSIC_VOID: { - const unsigned IntrinsicOpcode = - cast(Node->getOperand(1))->getZExtValue(); + const unsigned IntrinsicOpcode = Node->getConstantOperandVal(1); switch (IntrinsicOpcode) { default: break; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 5c34067c88889..f6ac41ed3479c 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -1528,7 +1528,7 @@ static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) { SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - unsigned Intrinsic = cast(Op->getOperand(0))->getZExtValue(); + unsigned Intrinsic = Op->getConstantOperandVal(0); switch (Intrinsic) { default: return SDValue(); @@ -2300,7 +2300,7 @@ static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { - unsigned Intr = cast(Op->getOperand(1))->getZExtValue(); + unsigned Intr = Op->getConstantOperandVal(1); switch (Intr) { default: return SDValue(); @@ -2375,7 +2375,7 @@ static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const { - unsigned Intr = cast(Op->getOperand(1))->getZExtValue(); + unsigned Intr = Op->getConstantOperandVal(1); switch (Intr) { default: return SDValue(); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 894a8636f4585..815c46edb6fa2 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -513,7 +513,7 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) { } bool NVPTXDAGToDAGISel::tryIntrinsicChain(SDNode *N) { - unsigned IID = cast(N->getOperand(1))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(1); switch (IID) { default: return false; @@ -730,7 +730,7 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget, } bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) { - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); switch (IID) { default: return false; @@ -1246,7 +1246,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) { if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { Op1 = N->getOperand(2); Mem = cast(N); - unsigned IID = cast(N->getOperand(1))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(1); switch (IID) { default: return false; diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index b57d185bb638b..ed96339240d92 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -4902,8 +4902,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) { return false; if (LHS.getOperand(0).getOpcode() != ISD::INTRINSIC_W_CHAIN || - cast(LHS.getOperand(0).getOperand(1))->getZExtValue() != - Intrinsic::loop_decrement) + LHS.getOperand(0).getConstantOperandVal(1) != Intrinsic::loop_decrement) return false; if (!isa(RHS)) @@ -6011,7 +6010,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) { // Op #3 is the Dest MBB // Op #4 is the Flag. // Prevent PPC::PRED_* from being selected into LI. - unsigned PCC = cast(N->getOperand(1))->getZExtValue(); + unsigned PCC = N->getConstantOperandVal(1); if (EnableBranchHint) PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(3)); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 385b3b74c34d6..8f27e6677afa5 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2817,8 +2817,8 @@ bool PPCTargetLowering::SelectAddressRegImm( return true; // [r+i] } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { // Match LOAD (ADD (X, Lo(G))). - assert(!cast(N.getOperand(1).getOperand(1))->getZExtValue() - && "Cannot handle constant offsets yet!"); + assert(!N.getOperand(1).getConstantOperandVal(1) && + "Cannot handle constant offsets yet!"); Disp = N.getOperand(1).getOperand(0); // The global address. assert(Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || @@ -3824,8 +3824,7 @@ SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { // Check all operands that may contain the LR. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - const InlineAsm::Flag Flags( - cast(Op.getOperand(i))->getZExtValue()); + const InlineAsm::Flag Flags(Op.getConstantOperandVal(i)); unsigned NumVals = Flags.getNumOperandRegisters(); ++i; // Skip the ID value. @@ -10442,8 +10441,7 @@ SDValue PPCTargetLowering::LowerVPERM(SDValue Op, SelectionDAG &DAG, /// information about the intrinsic. static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget) { - unsigned IntrinsicID = - cast(Intrin.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Intrin.getConstantOperandVal(0); CompareOpc = -1; isDot = false; switch (IntrinsicID) { @@ -10728,8 +10726,7 @@ static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, /// lower, do it, otherwise return null. SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { - unsigned IntrinsicID = - cast(Op.getOperand(0))->getZExtValue(); + unsigned IntrinsicID = Op.getConstantOperandVal(0); SDLoc dl(Op); @@ -10947,7 +10944,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, // Unpack the result based on how the target uses it. unsigned BitNo; // Bit # of CR6. bool InvertBit; // Invert result? - switch (cast(Op.getOperand(1))->getZExtValue()) { + switch (Op.getConstantOperandVal(1)) { default: // Can't happen, don't crash on invalid number though. case 0: // Return the value of the EQ bit of CR6. BitNo = 0; InvertBit = false; @@ -10983,7 +10980,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, // the beginning of the argument list. int ArgStart = isa(Op.getOperand(0)) ? 0 : 1; SDLoc DL(Op); - switch (cast(Op.getOperand(ArgStart))->getZExtValue()) { + switch (Op.getConstantOperandVal(ArgStart)) { case Intrinsic::ppc_cfence: { assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); SDValue Val = Op.getOperand(ArgStart + 1); @@ -11548,7 +11545,7 @@ SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { return SDValue(); // Custom lower is only done for high or low doubleword. - int Idx = cast(Op0.getOperand(1))->getZExtValue(); + int Idx = Op0.getConstantOperandVal(1); if (Idx % 2 != 0) return SDValue(); @@ -11717,8 +11714,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, break; } case ISD::INTRINSIC_W_CHAIN: { - if (cast(N->getOperand(1))->getZExtValue() != - Intrinsic::loop_decrement) + if (N->getConstantOperandVal(1) != Intrinsic::loop_decrement) break; assert(N->getValueType(0) == MVT::i1 && @@ -11734,7 +11730,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, break; } case ISD::INTRINSIC_WO_CHAIN: { - switch (cast(N->getOperand(0))->getZExtValue()) { + switch (N->getConstantOperandVal(0)) { case Intrinsic::ppc_pack_longdouble: Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, N->getOperand(2), N->getOperand(1))); @@ -13654,7 +13650,7 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { EVT VT; - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { default: return false; case Intrinsic::ppc_altivec_lvx: case Intrinsic::ppc_altivec_lvxl: @@ -13682,7 +13678,7 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, if (N->getOpcode() == ISD::INTRINSIC_VOID) { EVT VT; - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { default: return false; case Intrinsic::ppc_altivec_stvx: case Intrinsic::ppc_altivec_stvxl: @@ -15546,8 +15542,7 @@ SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, } static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth) { - unsigned IntrinsicID = - cast(Intrin.getOperand(1))->getZExtValue(); + unsigned IntrinsicID = Intrin.getConstantOperandVal(1); if (IntrinsicID == Intrinsic::ppc_stdcx) StoreWidth = 8; else if (IntrinsicID == Intrinsic::ppc_stwcx) @@ -15979,7 +15974,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, break; case ISD::INTRINSIC_WO_CHAIN: { bool isLittleEndian = Subtarget.isLittleEndian(); - unsigned IID = cast(N->getOperand(0))->getZExtValue(); + unsigned IID = N->getConstantOperandVal(0); Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr : Intrinsic::ppc_altivec_lvsl); if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) { @@ -15992,36 +15987,34 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, .zext(Add.getScalarValueSizeInBits()))) { SDNode *BasePtr = Add->getOperand(0).getNode(); for (SDNode *U : BasePtr->uses()) { - if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN && - cast(U->getOperand(0))->getZExtValue() == IID) { - // We've found another LVSL/LVSR, and this address is an aligned - // multiple of that one. The results will be the same, so use the - // one we've just found instead. + if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN && + U->getConstantOperandVal(0) == IID) { + // We've found another LVSL/LVSR, and this address is an aligned + // multiple of that one. The results will be the same, so use the + // one we've just found instead. - return SDValue(U, 0); - } + return SDValue(U, 0); + } } } if (isa(Add->getOperand(1))) { SDNode *BasePtr = Add->getOperand(0).getNode(); for (SDNode *U : BasePtr->uses()) { - if (U->getOpcode() == ISD::ADD && - isa(U->getOperand(1)) && - (cast(Add->getOperand(1))->getZExtValue() - - cast(U->getOperand(1))->getZExtValue()) % - (1ULL << Bits) == - 0) { - SDNode *OtherAdd = U; - for (SDNode *V : OtherAdd->uses()) { - if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN && - cast(V->getOperand(0))->getZExtValue() == - IID) { - return SDValue(V, 0); - } + if (U->getOpcode() == ISD::ADD && + isa(U->getOperand(1)) && + (Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) % + (1ULL << Bits) == + 0) { + SDNode *OtherAdd = U; + for (SDNode *V : OtherAdd->uses()) { + if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN && + V->getConstantOperandVal(0) == IID) { + return SDValue(V, 0); } } } + } } } @@ -16061,30 +16054,30 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, break; case ISD::INTRINSIC_W_CHAIN: - switch (cast(N->getOperand(1))->getZExtValue()) { - default: - break; - case Intrinsic::ppc_altivec_vsum4sbs: - case Intrinsic::ppc_altivec_vsum4shs: - case Intrinsic::ppc_altivec_vsum4ubs: { - // These sum-across intrinsics only have a chain due to the side effect - // that they may set the SAT bit. If we know the SAT bit will not be set - // for some inputs, we can replace any uses of their chain with the input - // chain. - if (BuildVectorSDNode *BVN = - dyn_cast(N->getOperand(3))) { - APInt APSplatBits, APSplatUndef; - unsigned SplatBitSize; - bool HasAnyUndefs; - bool BVNIsConstantSplat = BVN->isConstantSplat( - APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0, - !Subtarget.isLittleEndian()); - // If the constant splat vector is 0, the SAT bit will not be set. - if (BVNIsConstantSplat && APSplatBits == 0) - DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), N->getOperand(0)); + switch (N->getConstantOperandVal(1)) { + default: + break; + case Intrinsic::ppc_altivec_vsum4sbs: + case Intrinsic::ppc_altivec_vsum4shs: + case Intrinsic::ppc_altivec_vsum4ubs: { + // These sum-across intrinsics only have a chain due to the side effect + // that they may set the SAT bit. If we know the SAT bit will not be set + // for some inputs, we can replace any uses of their chain with the + // input chain. + if (BuildVectorSDNode *BVN = + dyn_cast(N->getOperand(3))) { + APInt APSplatBits, APSplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + bool BVNIsConstantSplat = BVN->isConstantSplat( + APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0, + !Subtarget.isLittleEndian()); + // If the constant splat vector is 0, the SAT bit will not be set. + if (BVNIsConstantSplat && APSplatBits == 0) + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), N->getOperand(0)); + } + return SDValue(); } - return SDValue(); - } case Intrinsic::ppc_vsx_lxvw4x: case Intrinsic::ppc_vsx_lxvd2x: // For little endian, VSX loads require generating lxvd2x/xxswapd. @@ -16098,7 +16091,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // For little endian, VSX stores require generating xxswapd/stxvd2x. // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. if (Subtarget.needsSwapsForVSXMemOps()) { - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { default: break; case Intrinsic::ppc_vsx_stxvw4x: @@ -16327,7 +16320,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // Unpack the result based on how the target uses it. PPC::Predicate CompOpc; - switch (cast(LHS.getOperand(1))->getZExtValue()) { + switch (LHS.getConstantOperandVal(1)) { default: // Can't happen, don't crash on invalid number though. case 0: // Branch on the value of the EQ bit of CR6. CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; @@ -16406,7 +16399,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, break; } case ISD::INTRINSIC_WO_CHAIN: { - switch (cast(Op.getOperand(0))->getZExtValue()) { + switch (Op.getConstantOperandVal(0)) { default: break; case Intrinsic::ppc_altivec_vcmpbfp_p: case Intrinsic::ppc_altivec_vcmpeqfp_p: @@ -16433,7 +16426,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, break; } case ISD::INTRINSIC_W_CHAIN: { - switch (cast(Op.getOperand(1))->getZExtValue()) { + switch (Op.getConstantOperandVal(1)) { default: break; case Intrinsic::ppc_load2r: @@ -16868,7 +16861,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, return SDValue(); SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); // Make sure the function does not optimize away the store of the RA to // the stack. @@ -16901,7 +16894,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); @@ -18086,8 +18079,7 @@ static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet, FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates. else FlagSet |= PPC::MOF_RPlusR; // Register. - } else if (RHS.getOpcode() == PPCISD::Lo && - !cast(RHS.getOperand(1))->getZExtValue()) + } else if (RHS.getOpcode() == PPCISD::Lo && !RHS.getConstantOperandVal(1)) FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo. else FlagSet |= PPC::MOF_RPlusR; @@ -18131,7 +18123,7 @@ unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N, unsigned ParentOp = Parent->getOpcode(); if (Subtarget.isISA3_1() && ((ParentOp == ISD::INTRINSIC_W_CHAIN) || (ParentOp == ISD::INTRINSIC_VOID))) { - unsigned ID = cast(Parent->getOperand(1))->getZExtValue(); + unsigned ID = Parent->getConstantOperandVal(1); if ((ID == Intrinsic::ppc_vsx_lxvp) || (ID == Intrinsic::ppc_vsx_stxvp)) { SDValue IntrinOp = (ID == Intrinsic::ppc_vsx_lxvp) ? Parent->getOperand(2) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 098a320c91533..bfa3bf3cc74e2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1360,7 +1360,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { if (N0.getOpcode() != ISD::AND || !isa(N0.getOperand(1))) break; - uint64_t C2 = cast(N0.getOperand(1))->getZExtValue(); + uint64_t C2 = N0.getConstantOperandVal(1); // Constant should be a mask. if (!isMask_64(C2)) @@ -1604,7 +1604,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { break; } case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(1); switch (IntNo) { // By default we do not custom select any intrinsic. default: @@ -1825,7 +1825,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { break; } case ISD::INTRINSIC_VOID: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned IntNo = Node->getConstantOperandVal(1); switch (IntNo) { case Intrinsic::riscv_vsseg2: case Intrinsic::riscv_vsseg3: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 51580d15451ca..03a59f8a8b57c 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -7235,7 +7235,7 @@ SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); while (Depth--) { int Offset = -(XLenInBytes * 2); SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, @@ -7260,7 +7260,7 @@ SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); if (Depth) { int Off = -XLenInBytes; SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); @@ -11731,7 +11731,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, break; } case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + unsigned IntNo = N->getConstantOperandVal(0); switch (IntNo) { default: llvm_unreachable( @@ -14153,7 +14153,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, for (SDNode *U : N0->uses()) { if (U->getOpcode() != ISD::SRA || !isa(U->getOperand(1)) || - cast(U->getOperand(1))->getZExtValue() > 32) + U->getConstantOperandVal(1) > 32) return SDValue(); } diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index 4f08014792110..78bdf3ae9a84b 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2050,7 +2050,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) && isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) { SDValue CMPCC = LHS.getOperand(3); - SPCC = cast(LHS.getOperand(2))->getZExtValue(); + SPCC = LHS.getConstantOperandVal(2); LHS = CMPCC.getOperand(0); RHS = CMPCC.getOperand(1); } @@ -3186,7 +3186,7 @@ static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); SDLoc dl(Op); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 559f2ca476d70..045c4c0aac07a 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -2186,7 +2186,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // the mask of valid CC values if so. static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid) { - unsigned Id = cast(Op.getOperand(1))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(1); switch (Id) { case Intrinsic::s390_tbegin: Opcode = SystemZISD::TBEGIN; @@ -2212,7 +2212,7 @@ static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, // CC value as its final argument. Provide the associated SystemZISD // opcode and the mask of valid CC values if so. static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { - unsigned Id = cast(Op.getOperand(0))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(0); switch (Id) { case Intrinsic::s390_vpkshs: case Intrinsic::s390_vpksfs: @@ -2600,10 +2600,9 @@ static bool shouldSwapCmpOperands(const Comparison &C) { return true; if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) return true; - if (C.ICmpType != SystemZICMP::SignedOnly && - Opcode0 == ISD::AND && + if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::AND && C.Op0.getOperand(1).getOpcode() == ISD::Constant && - cast(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) + C.Op0.getConstantOperandVal(1) == 0xffffffff) return true; return false; @@ -3429,11 +3428,9 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { return (Neg.getOpcode() == ISD::SUB && Neg.getOperand(0).getOpcode() == ISD::Constant && - cast(Neg.getOperand(0))->getZExtValue() == 0 && - Neg.getOperand(1) == Pos && - (Pos == CmpOp || - (Pos.getOpcode() == ISD::SIGN_EXTEND && - Pos.getOperand(0) == CmpOp))); + Neg.getConstantOperandVal(0) == 0 && Neg.getOperand(1) == Pos && + (Pos == CmpOp || (Pos.getOpcode() == ISD::SIGN_EXTEND && + Pos.getOperand(0) == CmpOp))); } // Return the absolute or negative absolute of Op; IsNegative decides which. @@ -3740,7 +3737,7 @@ SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, MFI.setFrameAddressIsTaken(true); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); EVT PtrVT = getPointerTy(DAG.getDataLayout()); // By definition, the frame address is the address of the back chain. (In @@ -3776,7 +3773,7 @@ SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, return SDValue(); SDLoc DL(Op); - unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + unsigned Depth = Op.getConstantOperandVal(0); EVT PtrVT = getPointerTy(DAG.getDataLayout()); if (Depth > 0) { @@ -4226,7 +4223,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { if (HighOp.getOpcode() == ISD::AND && HighOp.getOperand(1).getOpcode() == ISD::Constant) { SDValue HighOp0 = HighOp.getOperand(0); - uint64_t Mask = cast(HighOp.getOperand(1))->getZExtValue(); + uint64_t Mask = HighOp.getConstantOperandVal(1); if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) HighOp = HighOp0; } @@ -4485,10 +4482,10 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - AtomicOrdering FenceOrdering = static_cast( - cast(Op.getOperand(1))->getZExtValue()); - SyncScope::ID FenceSSID = static_cast( - cast(Op.getOperand(2))->getZExtValue()); + AtomicOrdering FenceOrdering = + static_cast(Op.getConstantOperandVal(1)); + SyncScope::ID FenceSSID = + static_cast(Op.getConstantOperandVal(2)); // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. @@ -4773,13 +4770,13 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const { - bool IsData = cast(Op.getOperand(4))->getZExtValue(); + bool IsData = Op.getConstantOperandVal(4); if (!IsData) // Just preserve the chain. return Op.getOperand(0); SDLoc DL(Op); - bool IsWrite = cast(Op.getOperand(2))->getZExtValue(); + bool IsWrite = Op.getConstantOperandVal(2); unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; auto *Node = cast(Op.getNode()); SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32), @@ -4825,7 +4822,7 @@ SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1))); } - unsigned Id = cast(Op.getOperand(0))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(0); switch (Id) { case Intrinsic::thread_pointer: return lowerThreadPointer(SDLoc(Op), DAG); @@ -5628,7 +5625,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, Op = Op.getOperand(0); if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && Op.getOperand(1).getOpcode() == ISD::Constant) { - unsigned Elem = cast(Op.getOperand(1))->getZExtValue(); + unsigned Elem = Op.getConstantOperandVal(1); if (!GS.add(Op.getOperand(0), Elem)) return SDValue(); FoundOne = true; @@ -6727,8 +6724,7 @@ SDValue SystemZTargetLowering::combineLOAD( int Index = 1; if (User->getOpcode() == ISD::SRL && User->getOperand(1).getOpcode() == ISD::Constant && - cast(User->getOperand(1))->getZExtValue() == 64 && - User->hasOneUse()) { + User->getConstantOperandVal(1) == 64 && User->hasOneUse()) { User = *User->use_begin(); Index = 0; } @@ -6857,7 +6853,7 @@ static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart) { std::swap(Op0, Op1); if (Op1.getOpcode() != ISD::SHL || !Op1.getNode()->hasOneUse() || Op1.getOperand(1).getOpcode() != ISD::Constant || - cast(Op1.getOperand(1))->getZExtValue() != 64) + Op1.getConstantOperandVal(1) != 64) return false; Op1 = Op1.getOperand(0); @@ -7149,20 +7145,18 @@ SDValue SystemZTargetLowering::combineFP_ROUND( unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; SelectionDAG &DAG = DCI.DAG; SDValue Op0 = N->getOperand(OpNo); - if (N->getValueType(0) == MVT::f32 && - Op0.hasOneUse() && + if (N->getValueType(0) == MVT::f32 && Op0.hasOneUse() && Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && Op0.getOperand(0).getValueType() == MVT::v2f64 && Op0.getOperand(1).getOpcode() == ISD::Constant && - cast(Op0.getOperand(1))->getZExtValue() == 0) { + Op0.getConstantOperandVal(1) == 0) { SDValue Vec = Op0.getOperand(0); for (auto *U : Vec->uses()) { - if (U != Op0.getNode() && - U->hasOneUse() && + if (U != Op0.getNode() && U->hasOneUse() && U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && - cast(U->getOperand(1))->getZExtValue() == 1) { + U->getConstantOperandVal(1) == 1) { SDValue OtherRound = SDValue(*U->use_begin(), 0); if (OtherRound.getOpcode() == N->getOpcode() && OtherRound.getOperand(OpNo) == SDValue(U, 0) && @@ -7215,20 +7209,18 @@ SDValue SystemZTargetLowering::combineFP_EXTEND( unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; SelectionDAG &DAG = DCI.DAG; SDValue Op0 = N->getOperand(OpNo); - if (N->getValueType(0) == MVT::f64 && - Op0.hasOneUse() && + if (N->getValueType(0) == MVT::f64 && Op0.hasOneUse() && Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && Op0.getOperand(0).getValueType() == MVT::v4f32 && Op0.getOperand(1).getOpcode() == ISD::Constant && - cast(Op0.getOperand(1))->getZExtValue() == 0) { + Op0.getConstantOperandVal(1) == 0) { SDValue Vec = Op0.getOperand(0); for (auto *U : Vec->uses()) { - if (U != Op0.getNode() && - U->hasOneUse() && + if (U != Op0.getNode() && U->hasOneUse() && U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && U->getOperand(0) == Vec && U->getOperand(1).getOpcode() == ISD::Constant && - cast(U->getOperand(1))->getZExtValue() == 2) { + U->getConstantOperandVal(1) == 2) { SDValue OtherExtend = SDValue(*U->use_begin(), 0); if (OtherExtend.getOpcode() == N->getOpcode() && OtherExtend.getOperand(OpNo) == SDValue(U, 0) && @@ -7605,7 +7597,7 @@ SDValue SystemZTargetLowering::combineINTRINSIC( SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - unsigned Id = cast(N->getOperand(1))->getZExtValue(); + unsigned Id = N->getConstantOperandVal(1); switch (Id) { // VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15 // or larger is simply a vector load. @@ -7679,7 +7671,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, APInt SrcDemE; unsigned Opcode = Op.getOpcode(); if (Opcode == ISD::INTRINSIC_WO_CHAIN) { - unsigned Id = cast(Op.getOperand(0))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(0); switch (Id) { case Intrinsic::s390_vpksh: // PACKS case Intrinsic::s390_vpksf: @@ -7723,7 +7715,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, SrcDemE = APInt(NumElts, 0); if (!DemandedElts[OpNo - 1]) break; - unsigned Mask = cast(Op.getOperand(3))->getZExtValue(); + unsigned Mask = Op.getConstantOperandVal(3); unsigned MaskBit = ((OpNo - 1) ? 1 : 4); // Demand input element 0 or 1, given by the mask bit value. SrcDemE.setBit((Mask & MaskBit)? 1 : 0); @@ -7732,7 +7724,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, case Intrinsic::s390_vsldb: { // VECTOR SHIFT LEFT DOUBLE BY BYTE assert(VT == MVT::v16i8 && "Unexpected type."); - unsigned FirstIdx = cast(Op.getOperand(3))->getZExtValue(); + unsigned FirstIdx = Op.getConstantOperandVal(3); assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand."); unsigned NumSrc0Els = 16 - FirstIdx; SrcDemE = APInt(NumElts, 0); @@ -7808,7 +7800,7 @@ SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, unsigned Opcode = Op.getOpcode(); if (Opcode == ISD::INTRINSIC_WO_CHAIN) { bool IsLogical = false; - unsigned Id = cast(Op.getOperand(0))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(0); switch (Id) { case Intrinsic::s390_vpksh: // PACKS case Intrinsic::s390_vpksf: @@ -7908,7 +7900,7 @@ SystemZTargetLowering::ComputeNumSignBitsForTargetNode( return 1; unsigned Opcode = Op.getOpcode(); if (Opcode == ISD::INTRINSIC_WO_CHAIN) { - unsigned Id = cast(Op.getOperand(0))->getZExtValue(); + unsigned Id = Op.getConstantOperandVal(0); switch (Id) { case Intrinsic::s390_vpksh: // PACKS case Intrinsic::s390_vpksf: diff --git a/llvm/lib/Target/SystemZ/SystemZOperators.td b/llvm/lib/Target/SystemZ/SystemZOperators.td index af6cf340f8a32..d98bb886c1850 100644 --- a/llvm/lib/Target/SystemZ/SystemZOperators.td +++ b/llvm/lib/Target/SystemZ/SystemZOperators.td @@ -507,11 +507,11 @@ def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs), // Signed and unsigned comparisons. def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{ - unsigned Type = cast(N->getOperand(2))->getZExtValue(); + unsigned Type = N->getConstantOperandVal(2); return Type != SystemZICMP::UnsignedOnly; }]>; def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{ - unsigned Type = cast(N->getOperand(2))->getZExtValue(); + unsigned Type = N->getConstantOperandVal(2); return Type != SystemZICMP::SignedOnly; }]>; diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp index 0267aefd1e914..0e41a2d7aa03e 100644 --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1101,10 +1101,10 @@ Instruction *VETargetLowering::emitTrailingFence(IRBuilderBase &Builder, SDValue VETargetLowering::lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - AtomicOrdering FenceOrdering = static_cast( - cast(Op.getOperand(1))->getZExtValue()); - SyncScope::ID FenceSSID = static_cast( - cast(Op.getOperand(2))->getZExtValue()); + AtomicOrdering FenceOrdering = + static_cast(Op.getConstantOperandVal(1)); + SyncScope::ID FenceSSID = + static_cast(Op.getConstantOperandVal(2)); // VE uses Release consistency, so need a fence instruction if it is a // cross-thread fence. @@ -1766,7 +1766,7 @@ static SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG, SDValue VETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); switch (IntNo) { default: // Don't custom lower most intrinsics. return SDValue(); @@ -2937,8 +2937,8 @@ static bool isI32Insn(const SDNode *User, const SDNode *N) { if (User->getOperand(1).getNode() != N && User->getOperand(2).getNode() != N && isa(User->getOperand(3))) { - VECC::CondCode VECCVal = static_cast( - cast(User->getOperand(3))->getZExtValue()); + VECC::CondCode VECCVal = + static_cast(User->getConstantOperandVal(3)); return isIntVECondCode(VECCVal); } [[fallthrough]]; diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 77a997588c4fe..846eab93e1fea 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -5233,7 +5233,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) { break; case X86ISD::VPTERNLOG: { - uint8_t Imm = cast(Node->getOperand(3))->getZExtValue(); + uint8_t Imm = Node->getConstantOperandVal(3); if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0), Node->getOperand(1), Node->getOperand(2), Imm)) return; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a90ddf132c389..1e4b1361f98a6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -31758,7 +31758,7 @@ static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) { static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG) { - unsigned IsData = cast(Op.getOperand(4))->getZExtValue(); + unsigned IsData = Op.getConstantOperandVal(4); // We don't support non-data prefetch without PREFETCHI. // Just preserve the chain. diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp index 1288597fc6b01..05003ec304adc 100644 --- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp +++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -250,7 +250,7 @@ bool XCoreDAGToDAGISel::tryBRIND(SDNode *N) { SDValue Addr = N->getOperand(1); if (Addr->getOpcode() != ISD::INTRINSIC_W_CHAIN) return false; - unsigned IntNo = cast(Addr->getOperand(1))->getZExtValue(); + unsigned IntNo = Addr->getConstantOperandVal(1); if (IntNo != Intrinsic::xcore_checkevent) return false; SDValue nextAddr = Addr->getOperand(2); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp index 7736adab19e89..18feeaadb03c8 100644 --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -767,7 +767,7 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, // An index of zero corresponds to the current function's frame address. // An index of one to the parent's frame address, and so on. // Depths > 0 not supported yet! - if (cast(Op.getOperand(0))->getZExtValue() > 0) + if (Op.getConstantOperandVal(0) > 0) return SDValue(); MachineFunction &MF = DAG.getMachineFunction(); @@ -783,7 +783,7 @@ LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { // An index of zero corresponds to the current function's return address. // An index of one to the parent's return address, and so on. // Depths > 0 not supported yet! - if (cast(Op.getOperand(0))->getZExtValue() > 0) + if (Op.getConstantOperandVal(0) > 0) return SDValue(); MachineFunction &MF = DAG.getMachineFunction(); @@ -905,7 +905,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { SDValue XCoreTargetLowering:: LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + unsigned IntNo = Op.getConstantOperandVal(0); switch (IntNo) { case Intrinsic::xcore_crc8: EVT VT = Op.getValueType(); @@ -1497,7 +1497,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, switch (N->getOpcode()) { default: break; case ISD::INTRINSIC_VOID: - switch (cast(N->getOperand(1))->getZExtValue()) { + switch (N->getConstantOperandVal(1)) { case Intrinsic::xcore_outt: case Intrinsic::xcore_outct: case Intrinsic::xcore_chkct: { @@ -1733,30 +1733,30 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, break; case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); - switch (IntNo) { - case Intrinsic::xcore_getts: - // High bits are known to be zero. - Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), - Known.getBitWidth() - 16); - break; - case Intrinsic::xcore_int: - case Intrinsic::xcore_inct: - // High bits are known to be zero. - Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), - Known.getBitWidth() - 8); - break; - case Intrinsic::xcore_testct: - // Result is either 0 or 1. - Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), - Known.getBitWidth() - 1); - break; - case Intrinsic::xcore_testwct: - // Result is in the range 0 - 4. - Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(), - Known.getBitWidth() - 3); - break; - } + unsigned IntNo = Op.getConstantOperandVal(1); + switch (IntNo) { + case Intrinsic::xcore_getts: + // High bits are known to be zero. + Known.Zero = + APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 16); + break; + case Intrinsic::xcore_int: + case Intrinsic::xcore_inct: + // High bits are known to be zero. + Known.Zero = + APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 8); + break; + case Intrinsic::xcore_testct: + // Result is either 0 or 1. + Known.Zero = + APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 1); + break; + case Intrinsic::xcore_testwct: + // Result is in the range 0 - 4. + Known.Zero = + APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 3); + break; + } } break; }