Skip to content

Commit

Permalink
Clean up usages of asserting vector getters in Type
Browse files Browse the repository at this point in the history
Summary:
Remove usages of asserting vector getters in Type in preparation for the
VectorType refactor. The existence of these functions complicates the
refactor while adding little value.

Reviewers: sunfish, sdesmalen, efriedma

Reviewed By: efriedma

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77273
  • Loading branch information
christetreault-llvm committed Apr 9, 2020
1 parent c6f13ce commit b96558f
Show file tree
Hide file tree
Showing 8 changed files with 74 additions and 61 deletions.
6 changes: 4 additions & 2 deletions llvm/include/llvm/Analysis/Utils/Local.h
Expand Up @@ -63,7 +63,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,

// Splat the constant if needed.
if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
OpC = ConstantVector::getSplat(IntIdxTy->getVectorElementCount(), OpC);
OpC = ConstantVector::getSplat(
cast<VectorType>(IntIdxTy)->getElementCount(), OpC);

Constant *Scale = ConstantInt::get(IntIdxTy, Size);
Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
Expand All @@ -76,7 +77,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,

// Splat the index if needed.
if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op);
Op = Builder->CreateVectorSplat(
cast<VectorType>(IntIdxTy)->getNumElements(), Op);

// Convert to correct type.
if (Op->getType() != IntIdxTy)
Expand Down
17 changes: 9 additions & 8 deletions llvm/lib/Analysis/ConstantFolding.cpp
Expand Up @@ -155,11 +155,11 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {

// If the element types match, IR can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
unsigned NumSrcElt = cast<VectorType>(C->getType())->getNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);

Type *SrcEltTy = C->getType()->getVectorElementType();
Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
Type *DstEltTy = DestVTy->getElementType();

// Otherwise, we're changing the number of elements in a vector, which
Expand Down Expand Up @@ -218,7 +218,8 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src = C->getAggregateElement(SrcElt++);
if (Src && isa<UndefValue>(Src))
Src = Constant::getNullValue(C->getType()->getVectorElementType());
Src = Constant::getNullValue(
cast<VectorType>(C->getType())->getElementType());
else
Src = dyn_cast_or_null<ConstantInt>(Src);
if (!Src) // Reject constantexpr elements.
Expand Down Expand Up @@ -469,8 +470,8 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
NumElts = AT->getNumElements();
EltTy = AT->getElementType();
} else {
NumElts = C->getType()->getVectorNumElements();
EltTy = C->getType()->getVectorElementType();
NumElts = cast<VectorType>(C->getType())->getNumElements();
EltTy = cast<VectorType>(C->getType())->getElementType();
}
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
Expand Down Expand Up @@ -508,7 +509,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
const DataLayout &DL) {
// Bail out early. Not expect to load from scalable global variable.
if (LoadTy->isVectorTy() && LoadTy->getVectorIsScalable())
if (LoadTy->isVectorTy() && cast<VectorType>(LoadTy)->isScalable())
return nullptr;

auto *PTy = cast<PointerType>(C->getType());
Expand Down Expand Up @@ -836,7 +837,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
Type *ResElemTy = GEP->getResultElementType();
Type *ResTy = GEP->getType();
if (!SrcElemTy->isSized() ||
(SrcElemTy->isVectorTy() && SrcElemTy->getVectorIsScalable()))
(SrcElemTy->isVectorTy() && cast<VectorType>(SrcElemTy)->isScalable()))
return nullptr;

if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
Expand Down Expand Up @@ -2571,7 +2572,7 @@ static Constant *ConstantFoldVectorCall(StringRef Name,

// Do not iterate on scalable vector. The number of elements is unknown at
// compile-time.
if (VTy->getVectorIsScalable())
if (VTy->isScalable())
return nullptr;

if (IntrinsicID == Intrinsic::masked_load) {
Expand Down
32 changes: 17 additions & 15 deletions llvm/lib/Analysis/InstructionSimplify.cpp
Expand Up @@ -945,8 +945,9 @@ static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
// If any element of a constant divisor vector is zero or undef, the whole op
// is undef.
auto *Op1C = dyn_cast<Constant>(Op1);
if (Op1C && Ty->isVectorTy()) {
unsigned NumElts = Ty->getVectorNumElements();
auto *VTy = dyn_cast<VectorType>(Ty);
if (Op1C && VTy) {
unsigned NumElts = VTy->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = Op1C->getAggregateElement(i);
if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt)))
Expand Down Expand Up @@ -1221,7 +1222,8 @@ static bool isUndefShift(Value *Amount) {

// If all lanes of a vector shift are undefined the whole shift is.
if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
for (unsigned I = 0, E = cast<VectorType>(C->getType())->getNumElements();
I != E; ++I)
if (!isUndefShift(C->getAggregateElement(I)))
return false;
return true;
Expand Down Expand Up @@ -4011,7 +4013,7 @@ static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
Constant *TrueC, *FalseC;
if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) &&
match(FalseVal, m_Constant(FalseC))) {
unsigned NumElts = TrueC->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(TrueC->getType())->getNumElements();
SmallVector<Constant *, 16> NewC;
for (unsigned i = 0; i != NumElts; ++i) {
// Bail out on incomplete vector constants.
Expand Down Expand Up @@ -4081,7 +4083,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
return UndefValue::get(GEPTy);

bool IsScalableVec =
SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false;
isa<VectorType>(SrcTy) && cast<VectorType>(SrcTy)->isScalable();

if (Ops.size() == 2) {
// getelementptr P, 0 -> P.
Expand Down Expand Up @@ -4223,8 +4225,8 @@ Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,

// For fixed-length vector, fold into undef if index is out of bounds.
if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (!Vec->getType()->getVectorIsScalable() &&
CI->uge(Vec->getType()->getVectorNumElements()))
if (!cast<VectorType>(Vec->getType())->isScalable() &&
CI->uge(cast<VectorType>(Vec->getType())->getNumElements()))
return UndefValue::get(Vec->getType());
}

Expand Down Expand Up @@ -4280,6 +4282,7 @@ Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
/// If not, this returns null.
static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
unsigned) {
auto *VecVTy = cast<VectorType>(Vec->getType());
if (auto *CVec = dyn_cast<Constant>(Vec)) {
if (auto *CIdx = dyn_cast<Constant>(Idx))
return ConstantFoldExtractElementInstruction(CVec, CIdx);
Expand All @@ -4289,24 +4292,23 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ
return Splat;

if (isa<UndefValue>(Vec))
return UndefValue::get(Vec->getType()->getVectorElementType());
return UndefValue::get(VecVTy->getElementType());
}

// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
// For fixed-length vector, fold into undef if index is out of bounds.
if (!Vec->getType()->getVectorIsScalable() &&
IdxC->getValue().uge(Vec->getType()->getVectorNumElements()))
return UndefValue::get(Vec->getType()->getVectorElementType());
if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements()))
return UndefValue::get(VecVTy->getElementType());
if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
return Elt;
}

// An undef extract index can be arbitrarily chosen to be an out-of-range
// index value, which would result in the instruction being undef.
if (isa<UndefValue>(Idx))
return UndefValue::get(Vec->getType()->getVectorElementType());
return UndefValue::get(VecVTy->getElementType());

return nullptr;
}
Expand Down Expand Up @@ -4403,7 +4405,7 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
return nullptr;

// The mask value chooses which source operand we need to look at next.
int InVecNumElts = Op0->getType()->getVectorNumElements();
int InVecNumElts = cast<VectorType>(Op0->getType())->getNumElements();
int RootElt = MaskVal;
Value *SourceOp = Op0;
if (MaskVal >= InVecNumElts) {
Expand Down Expand Up @@ -4446,9 +4448,9 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
return UndefValue::get(RetTy);

Type *InVecTy = Op0->getType();
auto *InVecTy = cast<VectorType>(Op0->getType());
unsigned MaskNumElts = Mask.size();
ElementCount InVecEltCount = InVecTy->getVectorElementCount();
ElementCount InVecEltCount = InVecTy->getElementCount();

bool Scalable = InVecEltCount.Scalable;

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Analysis/Loads.cpp
Expand Up @@ -148,7 +148,8 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
const DominatorTree *DT) {
// For unsized types or scalable vectors we don't know exactly how many bytes
// are dereferenced, so bail out.
if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable()))
if (!Ty->isSized() ||
(Ty->isVectorTy() && cast<VectorType>(Ty)->isScalable()))
return false;

// When dereferenceability information is provided by a dereferenceable
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Analysis/MemoryBuiltins.cpp
Expand Up @@ -650,7 +650,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return unknown();

if (I.getAllocatedType()->isVectorTy() &&
I.getAllocatedType()->getVectorIsScalable())
cast<VectorType>(I.getAllocatedType())->isScalable())
return unknown();

APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/Analysis/TargetTransformInfo.cpp
Expand Up @@ -874,7 +874,7 @@ static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
else if (!SI)
return false;

SmallVector<int, 32> Mask(SI->getType()->getVectorNumElements(), -1);
SmallVector<int, 32> Mask(SI->getType()->getNumElements(), -1);

// Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
// we look at the left or right side.
Expand Down Expand Up @@ -1036,8 +1036,8 @@ static ReductionKind matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
if (!RD)
return RK_None;

Type *VecTy = RdxStart->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
auto *VecTy = cast<VectorType>(RdxStart->getType());
unsigned NumVecElems = VecTy->getNumElements();
if (!isPowerOf2_32(NumVecElems))
return RK_None;

Expand Down Expand Up @@ -1101,8 +1101,8 @@ matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
if (!RD)
return RK_None;

Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
auto *VecTy = cast<VectorType>(ReduxRoot->getOperand(0)->getType());
unsigned NumVecElems = VecTy->getNumElements();
if (!isPowerOf2_32(NumVecElems))
return RK_None;

Expand Down
50 changes: 27 additions & 23 deletions llvm/lib/Analysis/ValueTracking.cpp
Expand Up @@ -168,11 +168,12 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
APInt &DemandedLHS, APInt &DemandedRHS) {
// The length of scalable vectors is unknown at compile time, thus we
// cannot check their values
if (Shuf->getType()->getVectorElementCount().Scalable)
if (Shuf->getType()->isScalable())
return false;

int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
int NumMaskElts = Shuf->getType()->getVectorNumElements();
int NumElts =
cast<VectorType>(Shuf->getOperand(0)->getType())->getNumElements();
int NumMaskElts = Shuf->getType()->getNumElements();
DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
if (DemandedElts.isNullValue())
return true;
Expand Down Expand Up @@ -206,9 +207,10 @@ static void computeKnownBits(const Value *V, const APInt &DemandedElts,
static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
const Query &Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
: APInt(1, 1);
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
computeKnownBits(V, DemandedElts, Known, Depth, Q);
}

Expand Down Expand Up @@ -373,9 +375,10 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
const Query &Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
: APInt(1, 1);
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
return ComputeNumSignBits(V, DemandedElts, Depth, Q);
}

Expand Down Expand Up @@ -1791,7 +1794,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
const Value *Vec = I->getOperand(0);
const Value *Idx = I->getOperand(1);
auto *CIdx = dyn_cast<ConstantInt>(Idx);
unsigned NumElts = Vec->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(Vec->getType())->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
Expand Down Expand Up @@ -1870,8 +1873,8 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts,
Type *Ty = V->getType();
assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!");
assert(((Ty->isVectorTy() &&
Ty->getVectorNumElements() == DemandedElts.getBitWidth()) ||
assert(((Ty->isVectorTy() && cast<VectorType>(Ty)->getNumElements() ==
DemandedElts.getBitWidth()) ||
(!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) &&
"Unexpected vector size");

Expand Down Expand Up @@ -2510,7 +2513,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
const Value *Vec = EEI->getVectorOperand();
const Value *Idx = EEI->getIndexOperand();
auto *CIdx = dyn_cast<ConstantInt>(Idx);
unsigned NumElts = Vec->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(Vec->getType())->getNumElements();
APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
if (CIdx && CIdx->getValue().ult(NumElts))
DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
Expand All @@ -2524,9 +2527,10 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,

bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
Type *Ty = V->getType();
APInt DemandedElts = Ty->isVectorTy()
? APInt::getAllOnesValue(Ty->getVectorNumElements())
: APInt(1, 1);
APInt DemandedElts =
Ty->isVectorTy()
? APInt::getAllOnesValue(cast<VectorType>(Ty)->getNumElements())
: APInt(1, 1);
return isKnownNonZero(V, DemandedElts, Depth, Q);
}

Expand Down Expand Up @@ -2627,7 +2631,7 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V,
return 0;

unsigned MinSignBits = TyBits;
unsigned NumElts = CV->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(CV->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
if (!DemandedElts[i])
continue;
Expand Down Expand Up @@ -2670,8 +2674,8 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
// same behavior for poison though -- that's a FIXME today.

Type *Ty = V->getType();
assert(((Ty->isVectorTy() &&
Ty->getVectorNumElements() == DemandedElts.getBitWidth()) ||
assert(((Ty->isVectorTy() && cast<VectorType>(Ty)->getNumElements() ==
DemandedElts.getBitWidth()) ||
(!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) &&
"Unexpected vector size");

Expand Down Expand Up @@ -3246,8 +3250,8 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V,

// Handle vector of constants.
if (auto *CV = dyn_cast<Constant>(V)) {
if (CV->getType()->isVectorTy()) {
unsigned NumElts = CV->getType()->getVectorNumElements();
if (auto *CVVTy = dyn_cast<VectorType>(CV->getType())) {
unsigned NumElts = CVVTy->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
if (!CFP)
Expand Down Expand Up @@ -3423,7 +3427,7 @@ bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
return false;

// For vectors, verify that each element is not infinity.
unsigned NumElts = V->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
if (!Elt)
Expand Down Expand Up @@ -3524,7 +3528,7 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
return false;

// For vectors, verify that each element is not NaN.
unsigned NumElts = V->getType()->getVectorNumElements();
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
if (!Elt)
Expand Down

0 comments on commit b96558f

Please sign in to comment.