diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp index 46ffc77d8edd4..c9d25d4250d55 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -438,7 +438,7 @@ void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I, Type *const Ty = I.getType(); const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); - Type *const VecTy = VectorType::get(B.getInt32Ty(), 2); + auto *const VecTy = FixedVectorType::get(B.getInt32Ty(), 2); // This is the value in the atomic operation we need to combine in order to // reduce the number of atomic operations. diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp index 956508e12227b..0ef8586930276 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -598,7 +598,7 @@ bool AMDGPULibCalls::fold_read_write_pipe(CallInst *CI, IRBuilder<> &B, if (Size <= 8) PtrElemTy = Type::getIntNTy(Ctx, Size * 8); else - PtrElemTy = VectorType::get(Type::getInt64Ty(Ctx), Size / 8); + PtrElemTy = FixedVectorType::get(Type::getInt64Ty(Ctx), Size / 8); unsigned PtrArgLoc = CI->getNumArgOperands() - 3; auto PtrArg = CI->getArgOperand(PtrArgLoc); unsigned PtrArgAS = PtrArg->getType()->getPointerAddressSpace(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp index b39039861f517..2b5143ba7506c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp @@ -902,7 +902,7 @@ static Type* getIntrinsicParamType( return nullptr; } if (P.VectorSize > 1) - T = VectorType::get(T, P.VectorSize); + T = FixedVectorType::get(T, P.VectorSize); if (P.PtrKind != AMDGPULibFunc::BYVALUE) T = useAddrSpace ? T->getPointerTo((P.PtrKind & AMDGPULibFunc::ADDR_SPACE) - 1) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp index 52e192e576dda..58bd6e5f3b2b7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -167,7 +167,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) { } if (IsV3 && Size >= 32) { - V4Ty = VectorType::get(VT->getElementType(), 4); + V4Ty = FixedVectorType::get(VT->getElementType(), 4); // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads AdjustedArgTy = V4Ty; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp index ab5b62ccf82e0..524a34be876ff 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp @@ -516,7 +516,7 @@ bool AMDGPUPrintfRuntimeBinding::lowerPrintfForGpu( break; } if (EleCount > 1) { - IType = dyn_cast(VectorType::get(IType, EleCount)); + IType = FixedVectorType::get(IType, EleCount); } Arg = new BitCastInst(Arg, IType, "PrintArgVect", Brnch); WhatToStore.push_back(Arg); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index eeedfe7a8c029..9e738dd6fdb30 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -334,12 +334,12 @@ Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, SrcAddrSpace == AMDGPUAS::REGION_ADDRESS || DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS || DestAddrSpace == AMDGPUAS::REGION_ADDRESS) { - return VectorType::get(Type::getInt32Ty(Context), 2); + return FixedVectorType::get(Type::getInt32Ty(Context), 2); } // Global memory works best with 16-byte accesses. Private memory will also // hit this, although they'll be decomposed. - return VectorType::get(Type::getInt32Ty(Context), 4); + return FixedVectorType::get(Type::getInt32Ty(Context), 4); } void GCNTTIImpl::getMemcpyLoopResidualLoweringType(