-
Notifications
You must be signed in to change notification settings - Fork 15.3k
[DA][Delinearization] Move validation logic into Delinearization #169047
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
This stack of pull requests is managed by Graphite. Learn more about stacking. |
|
@llvm/pr-subscribers-llvm-analysis Author: Ryotaro Kasuga (kasuga-fj) ChangesThis patch moves the validation logic of delinearization results from DA to Delinearization. Also call it in
Patch is 28.81 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169047.diff 19 Files Affected:
diff --git a/llvm/include/llvm/Analysis/Delinearization.h b/llvm/include/llvm/Analysis/Delinearization.h
index 434cfb61699d6..500d62188b161 100644
--- a/llvm/include/llvm/Analysis/Delinearization.h
+++ b/llvm/include/llvm/Analysis/Delinearization.h
@@ -17,6 +17,7 @@
#define LLVM_ANALYSIS_DELINEARIZATION_H
#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Value.h"
namespace llvm {
class raw_ostream;
@@ -141,6 +142,15 @@ bool delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize);
+/// Check that each subscript in \p Subscripts is within the corresponding size
+/// in \p Sizes. For the outermost dimension, the subscript being negative is
+/// allowed. If \p Ptr is not nullptr, it may be used to get information from
+/// the IR pointer value, which may help in the validation.
+bool validateDelinearizationResult(ScalarEvolution &SE,
+ ArrayRef<const SCEV *> Sizes,
+ ArrayRef<const SCEV *> Subscripts,
+ const Value *Ptr = nullptr);
+
/// Gathers the individual index expressions from a GEP instruction.
///
/// This function optimistically assumes the GEP references into a fixed size
diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index 4064b25d9d4e7..0fe8244a11e85 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -656,6 +656,108 @@ bool llvm::delinearizeFixedSizeArray(ScalarEvolution &SE, const SCEV *Expr,
return !Subscripts.empty();
}
+static bool isKnownNonNegative(ScalarEvolution *SE, const SCEV *S,
+ const Value *Ptr) {
+ bool Inbounds = false;
+ if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Ptr))
+ Inbounds = SrcGEP->isInBounds();
+ if (Inbounds) {
+ if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
+ if (AddRec->isAffine()) {
+ // We know S is for Ptr, the operand on a load/store, so doesn't wrap.
+ // If both parts are NonNegative, the end result will be NonNegative
+ if (SE->isKnownNonNegative(AddRec->getStart()) &&
+ SE->isKnownNonNegative(AddRec->getOperand(1)))
+ return true;
+ }
+ }
+ }
+
+ return SE->isKnownNonNegative(S);
+}
+
+/// Compare to see if S is less than Size, using
+///
+/// isKnownNegative(S - Size)
+///
+/// with some extra checking if S is an AddRec and we can prove less-than using
+/// the loop bounds.
+static bool isKnownLessThan(ScalarEvolution *SE, const SCEV *S,
+ const SCEV *Size) {
+ // First unify to the same type
+ auto *SType = dyn_cast<IntegerType>(S->getType());
+ auto *SizeType = dyn_cast<IntegerType>(Size->getType());
+ if (!SType || !SizeType)
+ return false;
+ Type *MaxType =
+ (SType->getBitWidth() >= SizeType->getBitWidth()) ? SType : SizeType;
+ S = SE->getTruncateOrZeroExtend(S, MaxType);
+ Size = SE->getTruncateOrZeroExtend(Size, MaxType);
+
+ auto CollectUpperBound = [&](const Loop *L, Type *T) -> const SCEV * {
+ if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
+ const SCEV *UB = SE->getBackedgeTakenCount(L);
+ return SE->getTruncateOrZeroExtend(UB, T);
+ }
+ return nullptr;
+ };
+
+ auto CheckAddRecBECount = [&]() {
+ const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S);
+ if (!AddRec || !AddRec->isAffine() || !AddRec->hasNoSignedWrap())
+ return false;
+ const SCEV *BECount = CollectUpperBound(AddRec->getLoop(), MaxType);
+ // If the BTC cannot be computed, check the base case for S.
+ if (!BECount || isa<SCEVCouldNotCompute>(BECount))
+ return false;
+ const SCEV *Start = AddRec->getStart();
+ const SCEV *Step = AddRec->getStepRecurrence(*SE);
+ const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE);
+ const SCEV *Diff0 = SE->getMinusSCEV(Start, Size);
+ const SCEV *Diff1 = SE->getMinusSCEV(End, Size);
+
+ // If the value of Step is non-negative and the AddRec is non-wrap, it
+ // reaches its maximum at the last iteration. So it's enouth to check
+ // whether End - Size is negative.
+ if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1))
+ return true;
+
+ // If the value of Step is non-positive and the AddRec is non-wrap, the
+ // initial value is its maximum.
+ if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0))
+ return true;
+
+ // Even if we don't know the sign of Step, either Start or End must be
+ // the maximum value of the AddRec since it is non-wrap.
+ if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1))
+ return true;
+
+ return false;
+ };
+
+ if (CheckAddRecBECount())
+ return true;
+
+ // Check using normal isKnownNegative
+ const SCEV *LimitedBound = SE->getMinusSCEV(S, Size);
+ return SE->isKnownNegative(LimitedBound);
+}
+
+bool llvm::validateDelinearizationResult(ScalarEvolution &SE,
+ ArrayRef<const SCEV *> Sizes,
+ ArrayRef<const SCEV *> Subscripts,
+ const Value *Ptr) {
+ for (size_t I = 1; I < Sizes.size(); ++I) {
+ const SCEV *Size = Sizes[I - 1];
+ const SCEV *Subscript = Subscripts[I];
+ if (!isKnownNonNegative(&SE, Subscript, Ptr))
+ return false;
+ if (!isKnownLessThan(&SE, Subscript, Size))
+ return false;
+ }
+ return true;
+}
+
bool llvm::getIndexExpressionsFromGEP(ScalarEvolution &SE,
const GetElementPtrInst *GEP,
SmallVectorImpl<const SCEV *> &Subscripts,
@@ -804,6 +906,11 @@ void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI,
for (int i = 0; i < Size; i++)
O << "[" << *Subscripts[i] << "]";
O << "\n";
+
+ bool IsValid = validateDelinearizationResult(
+ *SE, Sizes, Subscripts, getLoadStorePointerOperand(&Inst));
+ O << "Delinearization validation: " << (IsValid ? "Succeeded" : "Failed")
+ << "\n";
}
}
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index ea261820fb2e6..f366a5d592f30 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -1476,83 +1476,6 @@ bool DependenceInfo::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X,
}
}
-/// Compare to see if S is less than Size, using
-///
-/// isKnownNegative(S - Size)
-///
-/// with some extra checking if S is an AddRec and we can prove less-than using
-/// the loop bounds.
-bool DependenceInfo::isKnownLessThan(const SCEV *S, const SCEV *Size) const {
- // First unify to the same type
- auto *SType = dyn_cast<IntegerType>(S->getType());
- auto *SizeType = dyn_cast<IntegerType>(Size->getType());
- if (!SType || !SizeType)
- return false;
- Type *MaxType =
- (SType->getBitWidth() >= SizeType->getBitWidth()) ? SType : SizeType;
- S = SE->getTruncateOrZeroExtend(S, MaxType);
- Size = SE->getTruncateOrZeroExtend(Size, MaxType);
-
- auto CheckAddRecBECount = [&]() {
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S);
- if (!AddRec || !AddRec->isAffine() || !AddRec->hasNoSignedWrap())
- return false;
- const SCEV *BECount = collectUpperBound(AddRec->getLoop(), MaxType);
- // If the BTC cannot be computed, check the base case for S.
- if (!BECount || isa<SCEVCouldNotCompute>(BECount))
- return false;
- const SCEV *Start = AddRec->getStart();
- const SCEV *Step = AddRec->getStepRecurrence(*SE);
- const SCEV *End = AddRec->evaluateAtIteration(BECount, *SE);
- const SCEV *Diff0 = SE->getMinusSCEV(Start, Size);
- const SCEV *Diff1 = SE->getMinusSCEV(End, Size);
-
- // If the value of Step is non-negative and the AddRec is non-wrap, it
- // reaches its maximum at the last iteration. So it's enouth to check
- // whether End - Size is negative.
- if (SE->isKnownNonNegative(Step) && SE->isKnownNegative(Diff1))
- return true;
-
- // If the value of Step is non-positive and the AddRec is non-wrap, the
- // initial value is its maximum.
- if (SE->isKnownNonPositive(Step) && SE->isKnownNegative(Diff0))
- return true;
-
- // Even if we don't know the sign of Step, either Start or End must be
- // the maximum value of the AddRec since it is non-wrap.
- if (SE->isKnownNegative(Diff0) && SE->isKnownNegative(Diff1))
- return true;
-
- return false;
- };
-
- if (CheckAddRecBECount())
- return true;
-
- // Check using normal isKnownNegative
- const SCEV *LimitedBound = SE->getMinusSCEV(S, Size);
- return SE->isKnownNegative(LimitedBound);
-}
-
-bool DependenceInfo::isKnownNonNegative(const SCEV *S, const Value *Ptr) const {
- bool Inbounds = false;
- if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Ptr))
- Inbounds = SrcGEP->isInBounds();
- if (Inbounds) {
- if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
- if (AddRec->isAffine()) {
- // We know S is for Ptr, the operand on a load/store, so doesn't wrap.
- // If both parts are NonNegative, the end result will be NonNegative
- if (SE->isKnownNonNegative(AddRec->getStart()) &&
- SE->isKnownNonNegative(AddRec->getOperand(1)))
- return true;
- }
- }
- }
-
- return SE->isKnownNonNegative(S);
-}
-
// All subscripts are all the same type.
// Loop bound may be smaller (e.g., a char).
// Should zero extend loop bound, since it's always >= 0.
@@ -3788,35 +3711,8 @@ bool DependenceInfo::tryDelinearizeFixedSize(
// iff the subscripts are positive and are less than the range of the
// dimension.
if (!DisableDelinearizationChecks) {
- auto AllIndicesInRange = [&](ArrayRef<const SCEV *> DimensionSizes,
- SmallVectorImpl<const SCEV *> &Subscripts,
- Value *Ptr) {
- size_t SSize = Subscripts.size();
- for (size_t I = 1; I < SSize; ++I) {
- const SCEV *S = Subscripts[I];
- if (!isKnownNonNegative(S, Ptr)) {
- LLVM_DEBUG({
- dbgs() << "Check failed: !isKnownNonNegative(S, Ptr)\n";
- dbgs() << " S: " << *S << "\n" << " Ptr: " << *Ptr << "\n";
- });
- return false;
- }
- const SCEV *Range = DimensionSizes[I - 1];
- if (!isKnownLessThan(S, Range)) {
- LLVM_DEBUG({
- dbgs() << "Check failed: !isKnownLessThan(S, Range)\n";
- dbgs() << " S: " << *S << "\n"
- << " Range: " << *Range << "\n";
- });
- return false;
- }
- }
- return true;
- };
-
- if (!AllIndicesInRange(SrcSizes, SrcSubscripts, SrcPtr) ||
- !AllIndicesInRange(DstSizes, DstSubscripts, DstPtr)) {
- LLVM_DEBUG(dbgs() << "Check failed: AllIndicesInRange.\n");
+ if (!validateDelinearizationResult(*SE, SrcSizes, SrcSubscripts, SrcPtr) ||
+ !validateDelinearizationResult(*SE, DstSizes, DstSubscripts, DstPtr)) {
SrcSubscripts.clear();
DstSubscripts.clear();
return false;
@@ -3874,8 +3770,6 @@ bool DependenceInfo::tryDelinearizeParametricSize(
SrcSubscripts.size() != DstSubscripts.size())
return false;
- size_t Size = SrcSubscripts.size();
-
// Statically check that the array bounds are in-range. The first subscript we
// don't have a size for and it cannot overflow into another subscript, so is
// always safe. The others need to be 0 <= subscript[i] < bound, for both src
@@ -3883,29 +3777,9 @@ bool DependenceInfo::tryDelinearizeParametricSize(
// FIXME: It may be better to record these sizes and add them as constraints
// to the dependency checks.
if (!DisableDelinearizationChecks)
- for (size_t I = 1; I < Size; ++I) {
- bool SNN = isKnownNonNegative(SrcSubscripts[I], SrcPtr);
- bool DNN = isKnownNonNegative(DstSubscripts[I], DstPtr);
- bool SLT = isKnownLessThan(SrcSubscripts[I], Sizes[I - 1]);
- bool DLT = isKnownLessThan(DstSubscripts[I], Sizes[I - 1]);
- if (SNN && DNN && SLT && DLT)
- continue;
-
- LLVM_DEBUG({
- dbgs() << "Delinearization checks failed: can't prove the following\n";
- if (!SNN)
- dbgs() << " isKnownNonNegative(" << *SrcSubscripts[I] << ")\n";
- if (!DNN)
- dbgs() << " isKnownNonNegative(" << *DstSubscripts[I] << ")\n";
- if (!SLT)
- dbgs() << " isKnownLessThan(" << *SrcSubscripts[I] << ", "
- << *Sizes[I - 1] << ")\n";
- if (!DLT)
- dbgs() << " isKnownLessThan(" << *DstSubscripts[I] << ", "
- << *Sizes[I - 1] << ")\n";
- });
+ if (!validateDelinearizationResult(*SE, Sizes, SrcSubscripts, SrcPtr) ||
+ !validateDelinearizationResult(*SE, Sizes, DstSubscripts, DstPtr))
return false;
- }
return true;
}
diff --git a/llvm/test/Analysis/Delinearization/a.ll b/llvm/test/Analysis/Delinearization/a.ll
index 1830a3da77857..5d2d4dc29206e 100644
--- a/llvm/test/Analysis/Delinearization/a.ll
+++ b/llvm/test/Analysis/Delinearization/a.ll
@@ -15,6 +15,7 @@ define void @foo(i64 %n, i64 %m, i64 %o, ptr nocapture %A) #0 {
; CHECK-NEXT: Base offset: %A
; CHECK-NEXT: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{3,+,2}<nuw><%for.i>][{-4,+,3}<nw><%for.j>][{7,+,5}<nw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Failed
;
entry:
%cmp32 = icmp sgt i64 %n, 0
diff --git a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
index 891d604f5cf13..9e6a4221f8eda 100644
--- a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
+++ b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
@@ -11,12 +11,14 @@ define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) !kernel_arg_addr_space !2 !
; CHECK-NEXT: Base offset: %A
; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[%call][{0,+,1}<nuw><nsw><%for.inc>]
+; CHECK-NEXT: Delinearization validation: Succeeded
; CHECK-EMPTY:
; CHECK-NEXT: Inst: %tmp5 = load float, ptr %arrayidx4, align 4
; CHECK-NEXT: AccessFunction: {(4 * %call1),+,(4 * %N)}<%for.inc>
; CHECK-NEXT: Base offset: %B
; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.inc>][%call1]
+; CHECK-NEXT: Delinearization validation: Failed
;
entry:
br label %entry.split
diff --git a/llvm/test/Analysis/Delinearization/divide_by_one.ll b/llvm/test/Analysis/Delinearization/divide_by_one.ll
index e812e65ba7fd7..3d8e55984291e 100644
--- a/llvm/test/Analysis/Delinearization/divide_by_one.ll
+++ b/llvm/test/Analysis/Delinearization/divide_by_one.ll
@@ -18,12 +18,14 @@ define void @test(ptr nocapture %dst, i32 %stride, i32 %bs) {
; CHECK-NEXT: Base offset: %dst
; CHECK-NEXT: ArrayDecl[UnknownSize][%stride] with elements of 1 bytes.
; CHECK-NEXT: ArrayRef[{(1 + %bs),+,-1}<nw><%for.cond1.preheader>][{-1,+,1}<nw><%for.body3>]
+; CHECK-NEXT: Delinearization validation: Failed
; CHECK-EMPTY:
; CHECK-NEXT: Inst: store i8 %0, ptr %arrayidx7, align 1
; CHECK-NEXT: AccessFunction: {{\{\{}}(%stride * %bs),+,(-1 * %stride)}<%for.cond1.preheader>,+,1}<nsw><%for.body3>
; CHECK-NEXT: Base offset: %dst
; CHECK-NEXT: ArrayDecl[UnknownSize][%stride] with elements of 1 bytes.
; CHECK-NEXT: ArrayRef[{%bs,+,-1}<nsw><%for.cond1.preheader>][{0,+,1}<nuw><nsw><%for.body3>]
+; CHECK-NEXT: Delinearization validation: Failed
;
entry:
%cmp20 = icmp sgt i32 %bs, -1
diff --git a/llvm/test/Analysis/Delinearization/fixed_size_array.ll b/llvm/test/Analysis/Delinearization/fixed_size_array.ll
index cecd1eacb1437..250d46c81a25b 100644
--- a/llvm/test/Analysis/Delinearization/fixed_size_array.ll
+++ b/llvm/test/Analysis/Delinearization/fixed_size_array.ll
@@ -15,6 +15,7 @@ define void @a_i_j_k(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -63,6 +64,7 @@ define void @a_i_nj_k(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{7,+,-1}<nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -118,12 +120,14 @@ define void @a_ijk_b_i2jk(ptr %a, ptr %b) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
; CHECK-EMPTY:
; CHECK-NEXT: Inst: store i32 1, ptr %b.idx, align 4
; CHECK-NEXT: AccessFunction: {{\{\{\{}}0,+,1024}<nuw><nsw><%for.i.header>,+,256}<nw><%for.j.header>,+,4}<nw><%for.k>
; CHECK-NEXT: Base offset: %b
; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -180,6 +184,7 @@ define void @a_i_2j1_k(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][4][64] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><%for.j.header>][{32,+,1}<nw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -284,6 +289,7 @@ define void @a_i_j_3k(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{0,+,1}<nuw><nsw><%for.j.header>][{0,+,3}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -386,6 +392,7 @@ define void @a_i_i_jk(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][288] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{\{\{}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
@@ -436,6 +443,7 @@ define void @a_i_jk_l(ptr %a) {
; CHECK-NEXT: Base offset: %a
; CHECK-NEXT: ArrayDecl[UnknownSize][8][32] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.i.header>][{{\{\{}}0,+,1}<nuw><nsw><%for.j.header>,+,1}<nuw><nsw><%for.k.header>][{0,+,1}<nuw><nsw><%for.l>]
+; CHECK-NEXT: Delinearization validation: Succeeded
;
entry:
br label %for.i.header
diff --git a/llvm/test/Analysis/Delinearization/himeno_1.ll b/llvm/test/Analysis/Delinearization/himeno_1.ll
index 5ae5d04505b8c..8655a257d8b74 100644
--- a/llvm/test/Analysis/Delinearization/himeno_1.ll
+++ b/llvm/test/Analysis/Delinearization/himeno_1.ll
@@ -36,6 +36,7 @@ define void @jacobi(i32 %nn, ptr nocapture %a, ptr nocapture %p) nounwind uwtabl
; CHECK-NEXT: Base offset: %a.base
; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK-NEXT: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
+; CHECK-NEXT: Delinearization validation: Failed
;
entry:
%p.rows.ptr = getelementptr inbounds %struct.Mat, ptr %p, i64 0, i32 2
diff --git a/llvm/test/Analysis/Delinearization/himeno_2.ll b/llvm/test/Analysis/Delinearization/himeno_2.ll
index 75e4f027c4c6c..21a445eeaf841 100644
--- a/llvm/test/Analysis/Delinearization/himeno_2.ll
+++ b/llvm/test/Analysis/Delinearization/himeno_2.ll
@@ -36,6 +36,7 @@ define void @jacobi(i32 %nn, ptr nocapture %a, ptr nocapture %p) nounwind uwtabl
; CHECK-NEXT: Base offset: %a.base
; CHECK-NEXT: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] wi...
[truncated]
|
🐧 Linux x64 Test Results
|

This patch moves the validation logic of delinearization results from DA to Delinearization. Also call it in
printDelinearizationto test its behavior. The motivation is as follows:tryDelinearizeFixedSizeandtryDelinearizeParametricSize. Consolidating it in Delinearization avoids code duplication.This patch changes the test outputs and debug messages, but otherwise NFCI.