diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index a37128b0d745a..6617373f89c8b 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -2123,7 +2123,7 @@ static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL, // Pointer cannot be null if it's a block address, GV or alloca. // NOTE: We don't support extern_weak, but if we did, we'd need to check for // it as the symbol could be null in such cases. - if (isa(V) || isa(V) || isa(V)) + if (isa(V)) return true; // Check nonnull arguments. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp index fa75c9284c75b..7ec2ee06b811a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp @@ -103,7 +103,7 @@ static bool canSafelyConvertTo16Bit(Value &V, bool IsFloat) { // Convert a value to 16-bit. static Value *convertTo16Bit(Value &V, InstCombiner::BuilderTy &Builder) { Type *VTy = V.getType(); - if (isa(&V) || isa(&V) || isa(&V)) + if (isa(&V)) return cast(&V)->getOperand(0); if (VTy->isIntegerTy()) return Builder.CreateIntCast(&V, Type::getInt16Ty(V.getContext()), false); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp index d41e704a4a11a..627ac6b0063e1 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp @@ -38,8 +38,7 @@ bool AMDGPUInstrInfo::isUniformMMO(const MachineMemOperand *MMO) { // Sometimes LDS instructions have constant pointers. // If Ptr is null, then that means this mem operand contains a // PseudoSourceValue like GOT. - if (!Ptr || isa(Ptr) || - isa(Ptr) || isa(Ptr)) + if (!Ptr || isa(Ptr)) return true; if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 6ef7505ec6f62..2fa03e3964207 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2946,8 +2946,7 @@ bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { // Sometimes LDS instructions have constant pointers. // If Ptr is null, then that means this mem operand contains a // PseudoSourceValue like GOT. - if (!Ptr || isa(Ptr) || isa(Ptr) || - isa(Ptr) || isa(Ptr)) + if (!Ptr || isa(Ptr)) return true; if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp index 9ae043048b932..eab76bb79c969 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp @@ -127,9 +127,7 @@ class LiveRegOptimizer { return LK.first != TargetLoweringBase::TypeLegal; } - bool isOpLegal(Instruction *I) { - return isa(I) || isa(I); - } + bool isOpLegal(Instruction *I) { return isa(I); } bool isCoercionProfitable(Instruction *II) { SmallPtrSet CVisited; @@ -144,9 +142,8 @@ class LiveRegOptimizer { auto IsLookThru = [](Instruction *II) { if (const auto *Intr = dyn_cast(II)) return Intr->getIntrinsicID() == Intrinsic::amdgcn_perm; - return isa(II) || isa(II) || - isa(II) || isa(II) || - isa(II); + return isa(II); }; while (!UserList.empty()) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp index a62fc60461193..7163ad2aa7dca 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp @@ -2408,7 +2408,7 @@ bool AMDGPULowerBufferFatPointers::run(Module &M, const TargetMachine &TM) { for (Function &F : M.functions()) for (Instruction &I : instructions(F)) for (Value *Op : I.operands()) - if (isa(Op) || isa(Op)) + if (isa(Op)) Worklist.push_back(cast(Op)); // Recursively look for any referenced buffer pointer constants. @@ -2421,7 +2421,7 @@ bool AMDGPULowerBufferFatPointers::run(Module &M, const TargetMachine &TM) { if (isBufferFatPtrOrVector(C->getType())) BufferFatPtrConsts.insert(C); for (Value *Op : C->operands()) - if (isa(Op) || isa(Op)) + if (isa(Op)) Worklist.push_back(cast(Op)); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 9512bcd5c4a13..204d3df546bbf 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -941,7 +941,7 @@ bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const { // atomic operation refers to the same address in each thread, then each // thread after the first sees the value written by the previous thread as // original value. - if (isa(V) || isa(V)) + if (isa(V)) return true; if (const IntrinsicInst *Intrinsic = dyn_cast(V)) { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 2e3cd5ca6692d..aed5571dfb27c 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -7370,8 +7370,7 @@ SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, /// not necessary. static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG, const AMDGPUTargetMachine &TM, unsigned AddrSpace) { - if (isa(Val) || isa(Val) || - isa(Val)) + if (isa(Val)) return true; if (auto *ConstVal = dyn_cast(Val))