diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index b2ffdf949d8b1..e0a54ccb22dd7 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -1069,7 +1069,6 @@ static Optional instCombineLD1GatherIndex(InstCombiner &IC, Value *BasePtr = II.getOperand(1); Value *Index = II.getOperand(2); Type *Ty = II.getType(); - Type *BasePtrTy = BasePtr->getType(); Value *PassThru = ConstantAggregateZero::get(Ty); // Contiguous gather => masked load. @@ -1085,8 +1084,8 @@ static Optional instCombineLD1GatherIndex(InstCombiner &IC, BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); Type *VecPtrTy = PointerType::getUnqual(Ty); - Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr, - IndexBase); + Value *Ptr = Builder.CreateGEP( + cast(Ty)->getElementType(), BasePtr, IndexBase); Ptr = Builder.CreateBitCast(Ptr, VecPtrTy); CallInst *MaskedLoad = Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru); @@ -1104,10 +1103,9 @@ static Optional instCombineST1ScatterIndex(InstCombiner &IC, Value *BasePtr = II.getOperand(2); Value *Index = II.getOperand(3); Type *Ty = Val->getType(); - Type *BasePtrTy = BasePtr->getType(); // Contiguous scatter => masked store. - // (sve.ld1.scatter.index Value Mask BasePtr (sve.index IndexBase 1)) + // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1)) // => (masked.store Value (gep BasePtr IndexBase) Align Mask) Value *IndexBase; if (match(Index, m_Intrinsic( @@ -1118,8 +1116,8 @@ static Optional instCombineST1ScatterIndex(InstCombiner &IC, Align Alignment = BasePtr->getPointerAlignment(II.getModule()->getDataLayout()); - Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr, - IndexBase); + Value *Ptr = Builder.CreateGEP( + cast(Ty)->getElementType(), BasePtr, IndexBase); Type *VecPtrTy = PointerType::getUnqual(Ty); Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);