39 changes: 16 additions & 23 deletions mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,6 @@ struct VL {
bool enableSIMDIndex32;
};

/// Helper to test for given index value.
static bool isIntValue(Value val, int64_t idx) {
if (auto ival = getConstantIntValue(val))
return *ival == idx;
return false;
}

/// Helper test for invariant value (defined outside given block).
static bool isInvariantValue(Value val, Block *block) {
return val.getDefiningOp() && val.getDefiningOp()->getBlock() != block;
Expand All @@ -67,9 +60,9 @@ static VectorType vectorType(VL vl, Type etp) {
return VectorType::get(vl.vectorLength, etp, numScalableDims);
}

/// Constructs vector type from pointer.
static VectorType vectorType(VL vl, Value ptr) {
return vectorType(vl, getMemRefType(ptr).getElementType());
/// Constructs vector type from a memref value.
static VectorType vectorType(VL vl, Value mem) {
return vectorType(vl, getMemRefType(mem).getElementType());
}

/// Constructs vector iteration mask.
Expand Down Expand Up @@ -116,35 +109,35 @@ static Value genVectorInvariantValue(PatternRewriter &rewriter, VL vl,
/// that the sparse compiler can only generate indirect loads in
/// the last index, i.e. back().
static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
Value ptr, ArrayRef<Value> idxs, Value vmask) {
VectorType vtp = vectorType(vl, ptr);
Value mem, ArrayRef<Value> idxs, Value vmask) {
VectorType vtp = vectorType(vl, mem);
Value pass = constantZero(rewriter, loc, vtp);
if (idxs.back().getType().isa<VectorType>()) {
SmallVector<Value> scalarArgs(idxs.begin(), idxs.end());
Value indexVec = idxs.back();
scalarArgs.back() = constantIndex(rewriter, loc, 0);
return rewriter.create<vector::GatherOp>(loc, vtp, ptr, scalarArgs,
return rewriter.create<vector::GatherOp>(loc, vtp, mem, scalarArgs,
indexVec, vmask, pass);
}
return rewriter.create<vector::MaskedLoadOp>(loc, vtp, ptr, idxs, vmask,
return rewriter.create<vector::MaskedLoadOp>(loc, vtp, mem, idxs, vmask,
pass);
}

/// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs
/// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note
/// that the sparse compiler can only generate indirect stores in
/// the last index, i.e. back().
static void genVectorStore(PatternRewriter &rewriter, Location loc, Value ptr,
static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem,
ArrayRef<Value> idxs, Value vmask, Value rhs) {
if (idxs.back().getType().isa<VectorType>()) {
SmallVector<Value> scalarArgs(idxs.begin(), idxs.end());
Value indexVec = idxs.back();
scalarArgs.back() = constantIndex(rewriter, loc, 0);
rewriter.create<vector::ScatterOp>(loc, ptr, scalarArgs, indexVec, vmask,
rewriter.create<vector::ScatterOp>(loc, mem, scalarArgs, indexVec, vmask,
rhs);
return;
}
rewriter.create<vector::MaskedStoreOp>(loc, ptr, idxs, vmask, rhs);
rewriter.create<vector::MaskedStoreOp>(loc, mem, idxs, vmask, rhs);
}

/// Detects a vectorizable reduction operations and returns the
Expand Down Expand Up @@ -233,9 +226,9 @@ static Value genVectorReducInit(PatternRewriter &rewriter, Location loc,
/// See https://llvm.org/docs/GetElementPtr.html for some background on
/// the complications described below.
///
/// We need to generate a pointer/index load from the sparse storage scheme.
/// Narrower data types need to be zero extended before casting the value
/// into the index type used for looping and indexing.
/// We need to generate a position/coordinate load from the sparse storage
/// scheme. Narrower data types need to be zero extended before casting
/// the value into the `index` type used for looping and indexing.
///
/// For the scalar case, subscripts simply zero extend narrower indices
/// into 64-bit values before casting to an index type without a performance
Expand Down Expand Up @@ -416,8 +409,8 @@ static bool vectorizeExpr(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
}
// Proper load operations. These are either values involved in the
// actual computation, such as a[i] = b[i] becomes a[lo:hi] = b[lo:hi],
// or index values inside the computation that are now fetched from
// the sparse storage index arrays, such as a[i] = i becomes
// or coordinate values inside the computation that are now fetched from
// the sparse storage coordinates arrays, such as a[i] = i becomes
// a[lo:hi] = ind[lo:hi], where 'lo' denotes the current index
// and 'hi = lo + vl - 1'.
if (auto load = dyn_cast<memref::LoadOp>(def)) {
Expand Down Expand Up @@ -619,7 +612,7 @@ struct ForOpRewriter : public OpRewritePattern<scf::ForOp> {
// Check for single block, unit-stride for-loop that is generated by
// sparse compiler, which means no data dependence analysis is required,
// and its loop-body is very restricted in form.
if (!op.getRegion().hasOneBlock() || !isIntValue(op.getStep(), 1) ||
if (!op.getRegion().hasOneBlock() || !isConstantIntValue(op.getStep(), 1) ||
!op->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName()))
return failure();
// Analyze (!codegen) and rewrite (codegen) loop-body.
Expand Down
37 changes: 19 additions & 18 deletions mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -450,12 +450,12 @@ static void tryLoosenAffineDenseConstraints(linalg::GenericOp op,
}
}

/// Computes a topologically sorted iteration graph for the linalg
/// operation. Ensures all tensors are visited in natural index order. This
/// is essential for sparse storage formats since these only support access
/// along fixed dimensions. Even for dense storage formats, however, the
/// natural index order yields innermost unit-stride access with better
/// spatial locality.
/// Computes a topologically sorted iteration graph for the linalg operation.
/// Ensures all tensors are visited in natural coordinate order. This is
/// essential for sparse storage formats since these only support access
/// along fixed levels. Even for dense storage formats, however, the natural
/// coordinate order yields innermost unit-stride access with better spatial
/// locality.
static bool computeIterationGraph(CodegenEnv &env, unsigned mask,
OpOperand *skip = nullptr) {
// Set up an n x n from/to adjacency matrix of the iteration graph
Expand Down Expand Up @@ -605,6 +605,9 @@ static void genBuffers(CodegenEnv &env, OpBuilder &builder) {
}

/// Generates index for load/store on sparse tensor.
// FIXME: It's not entirely clear what "index" means here (i.e., is it
// a "coordinate", or "Ldx", or what). So the function should be renamed
// and/or the documentation expanded in order to clarify.
static Value genIndex(CodegenEnv &env, OpOperand *t) {
auto map = env.op().getMatchingIndexingMap(t);
const auto stt = getSparseTensorType(t->get());
Expand Down Expand Up @@ -644,7 +647,7 @@ static Value genInsertionLoad(CodegenEnv &env, OpBuilder &builder,
OpOperand *t) {
linalg::GenericOp op = env.op();
Location loc = op.getLoc();
// Direct lexicographic index order, tensor loads as zero.
// Direct lexicographic coordinate order, tensor loads as zero.
if (!env.isExpand()) {
Type tp = getElementTypeOrSelf(t->get().getType());
return constantZero(builder, loc, tp);
Expand All @@ -660,7 +663,7 @@ static Value genInsertionLoadReduce(CodegenEnv &env, OpBuilder &builder,
linalg::GenericOp op = env.op();
Location loc = op.getLoc();
Value identity = env.getCustomRedId();
// Direct lexicographic index order, tensor loads as identity.
// Direct lexicographic coordinate order, tensor loads as identity.
if (!env.isExpand())
return identity;
// Load from expanded access pattern if filled, identity otherwise.
Expand All @@ -677,9 +680,12 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
Value rhs) {
linalg::GenericOp op = env.op();
Location loc = op.getLoc();
// Direct insertion in lexicographic index order.
// Direct insertion in lexicographic coordinate order.
if (!env.isExpand()) {
unsigned rank = op.getRank(t);
// FIXME: It's not entirely clear what "indices" means here (i.e.,
// are they "coordinates"? and if so, then are they level-coords or
// dim-coords?)
SmallVector<Value> indices;
for (unsigned i = 0; i < rank; i++) {
assert(env.emitter().getLoopIV(i));
Expand Down Expand Up @@ -822,11 +828,6 @@ inline static Value genInvariantValue(CodegenEnv &env, unsigned exp) {
return env.exp(exp).val;
}

/// Generates an index value.
inline static Value genIndexValue(CodegenEnv &env, unsigned idx) {
return env.getLoopIdxValue(idx);
}

/// Semi-ring branches are simply inlined by the sparse compiler. Prior
/// analysis has verified that all computations are "local" to the inlined
/// branch or otherwise invariantly defined outside the loop nest, with the
Expand All @@ -836,7 +837,7 @@ static Value relinkBranch(CodegenEnv &env, RewriterBase &rewriter, Block *block,
Value e, unsigned ldx) {
if (Operation *def = e.getDefiningOp()) {
if (auto indexOp = dyn_cast<linalg::IndexOp>(def))
return genIndexValue(env, indexOp.getDim());
return env.getLoopIdxValue(indexOp.getDim());
if (def->getBlock() == block) {
for (unsigned i = 0, n = def->getNumOperands(); i < n; i++) {
rewriter.updateRootInPlace(def, [&]() {
Expand All @@ -862,7 +863,7 @@ static Value genExp(CodegenEnv &env, RewriterBase &rewriter, unsigned exp,
if (env.exp(exp).kind == Kind::kInvariant)
return genInvariantValue(env, exp);
if (env.exp(exp).kind == Kind::kIndex)
return genIndexValue(env, env.exp(exp).index);
return env.getLoopIdxValue(env.exp(exp).index);

if (env.exp(exp).kind == Kind::kReduce)
env.startCustomReduc(exp); // enter custom
Expand Down Expand Up @@ -1613,8 +1614,8 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
auto dstEnc = SparseTensorEncodingAttr::get(
getContext(), srcEnc.getDimLevelType(),
permute(env, env.op().getMatchingIndexingMap(t)), // new order
srcEnc.getHigherOrdering(), srcEnc.getPointerBitWidth(),
srcEnc.getIndexBitWidth());
srcEnc.getHigherOrdering(), srcEnc.getPosWidth(),
srcEnc.getCrdWidth());
auto dstTp = RankedTensorType::get(srcTp.getShape(),
srcTp.getElementType(), dstEnc);
auto convert = rewriter.create<ConvertOp>(tval.getLoc(), dstTp, tval);
Expand Down
18 changes: 9 additions & 9 deletions mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,26 +53,26 @@ SparseTensorNNZ::SparseTensorNNZ(const std::vector<uint64_t> &lvlSizes,
}
}

void SparseTensorNNZ::forallIndices(uint64_t stopLvl,
SparseTensorNNZ::NNZConsumer yield) const {
void SparseTensorNNZ::forallCoords(uint64_t stopLvl,
SparseTensorNNZ::NNZConsumer yield) const {
assert(stopLvl < getLvlRank() && "Level out of bounds");
assert(isCompressedDLT(lvlTypes[stopLvl]) &&
"Cannot look up non-compressed levels");
forallIndices(yield, stopLvl, 0, 0);
forallCoords(yield, stopLvl, 0, 0);
}

void SparseTensorNNZ::add(const std::vector<uint64_t> &lvlInd) {
void SparseTensorNNZ::add(const std::vector<uint64_t> &lvlCoords) {
uint64_t parentPos = 0;
for (uint64_t l = 0, lvlrank = getLvlRank(); l < lvlrank; ++l) {
if (isCompressedDLT(lvlTypes[l]))
nnz[l][parentPos]++;
parentPos = parentPos * lvlSizes[l] + lvlInd[l];
parentPos = parentPos * lvlSizes[l] + lvlCoords[l];
}
}

void SparseTensorNNZ::forallIndices(SparseTensorNNZ::NNZConsumer yield,
uint64_t stopLvl, uint64_t parentPos,
uint64_t l) const {
void SparseTensorNNZ::forallCoords(SparseTensorNNZ::NNZConsumer yield,
uint64_t stopLvl, uint64_t parentPos,
uint64_t l) const {
assert(l <= stopLvl);
if (l == stopLvl) {
assert(parentPos < nnz[l].size() && "Cursor is out of range");
Expand All @@ -81,6 +81,6 @@ void SparseTensorNNZ::forallIndices(SparseTensorNNZ::NNZConsumer yield,
const uint64_t sz = lvlSizes[l];
const uint64_t pstart = parentPos * sz;
for (uint64_t i = 0; i < sz; ++i)
forallIndices(yield, stopLvl, pstart + i, l + 1);
forallCoords(yield, stopLvl, pstart + i, l + 1);
}
}
20 changes: 10 additions & 10 deletions mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,19 +74,19 @@ SparseTensorStorageBase::SparseTensorStorageBase( // NOLINT
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_NEWENUMERATOR)
#undef IMPL_NEWENUMERATOR

#define IMPL_GETPOINTERS(PNAME, P) \
void SparseTensorStorageBase::getPointers(std::vector<P> **, uint64_t) { \
FATAL_PIV("getPointers" #PNAME); \
#define IMPL_GETPOSITIONS(PNAME, P) \
void SparseTensorStorageBase::getPositions(std::vector<P> **, uint64_t) { \
FATAL_PIV("getPositions" #PNAME); \
}
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOINTERS)
#undef IMPL_GETPOINTERS
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOSITIONS)
#undef IMPL_GETPOSITIONS

#define IMPL_GETINDICES(INAME, I) \
void SparseTensorStorageBase::getIndices(std::vector<I> **, uint64_t) { \
FATAL_PIV("getIndices" #INAME); \
#define IMPL_GETCOORDINATES(CNAME, C) \
void SparseTensorStorageBase::getCoordinates(std::vector<C> **, uint64_t) { \
FATAL_PIV("getCoordinates" #CNAME); \
}
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETINDICES)
#undef IMPL_GETINDICES
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATES)
#undef IMPL_GETCOORDINATES

#define IMPL_GETVALUES(VNAME, V) \
void SparseTensorStorageBase::getValues(std::vector<V> **) { \
Expand Down
259 changes: 134 additions & 125 deletions mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp

Large diffs are not rendered by default.

34 changes: 16 additions & 18 deletions mlir/test/CAPI/sparse_tensor.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
"dimLevelType = [ \"dense\", \"compressed\", \"compressed\"], "
"dimOrdering = affine_map<(d0, d1, d2) -> (d0, d1, d2)>, "
"higherOrdering = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
"pointerBitWidth = 32, indexBitWidth = 64 }>";
"posWidth = 32, crdWidth = 64 }>";
// clang-format on
MlirAttribute originalAttr =
mlirAttributeParseGet(ctx, mlirStringRefCreateFromCString(originalAsm));
Expand All @@ -46,26 +46,24 @@ static int testRoundtripEncoding(MlirContext ctx) {
// CHECK: level_type: 4
// CHECK: level_type: 8
// CHECK: level_type: 8
int numLevelTypes = mlirSparseTensorEncodingGetNumDimLevelTypes(originalAttr);
int lvlRank = mlirSparseTensorEncodingGetLvlRank(originalAttr);
enum MlirSparseTensorDimLevelType *levelTypes =
malloc(sizeof(enum MlirSparseTensorDimLevelType) * numLevelTypes);
for (int i = 0; i < numLevelTypes; ++i) {
levelTypes[i] =
mlirSparseTensorEncodingAttrGetDimLevelType(originalAttr, i);
fprintf(stderr, "level_type: %d\n", levelTypes[i]);
malloc(sizeof(enum MlirSparseTensorDimLevelType) * lvlRank);
for (int l = 0; l < lvlRank; ++l) {
levelTypes[l] =
mlirSparseTensorEncodingAttrGetDimLevelType(originalAttr, l);
fprintf(stderr, "level_type: %d\n", levelTypes[l]);
}
// CHECK: pointer: 32
int pointerBitWidth =
mlirSparseTensorEncodingAttrGetPointerBitWidth(originalAttr);
fprintf(stderr, "pointer: %d\n", pointerBitWidth);
// CHECK: index: 64
int indexBitWidth =
mlirSparseTensorEncodingAttrGetIndexBitWidth(originalAttr);
fprintf(stderr, "index: %d\n", indexBitWidth);
// CHECK: posWidth: 32
int posWidth = mlirSparseTensorEncodingAttrGetPosWidth(originalAttr);
fprintf(stderr, "posWidth: %d\n", posWidth);
// CHECK: crdWidth: 64
int crdWidth = mlirSparseTensorEncodingAttrGetCrdWidth(originalAttr);
fprintf(stderr, "crdWidth: %d\n", crdWidth);

MlirAttribute newAttr = mlirSparseTensorEncodingAttrGet(
ctx, numLevelTypes, levelTypes, dimOrdering, higherOrdering,
pointerBitWidth, indexBitWidth);
MlirAttribute newAttr =
mlirSparseTensorEncodingAttrGet(ctx, lvlRank, levelTypes, dimOrdering,
higherOrdering, posWidth, crdWidth);
mlirAttributeDump(newAttr); // For debugging filecheck output.
// CHECK: equal: 1
fprintf(stderr, "equal: %d\n", mlirAttributeEqual(originalAttr, newAttr));
Expand Down
86 changes: 43 additions & 43 deletions mlir/test/Dialect/SparseTensor/codegen.mlir

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
// CHECK: %[[VAL_9:.*]] = memref.cast %[[VAL_8]] : memref<16xf64> to memref<?xf64>
// CHECK: linalg.fill ins(%[[VAL_2]] : f64) outs(%[[VAL_8]] : memref<16xf64>)
// CHECK: %[[VAL_10:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_12:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] dim_sz at 0 with %[[VAL_0]] : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.get %[[VAL_12]] ptr_mem_sz at 0 : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_12:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] lvl_sz at 0 with %[[VAL_0]] : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.get %[[VAL_12]] pos_mem_sz at 0 : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_15:.*]], %[[VAL_17:.*]] = sparse_tensor.push_back %[[VAL_14]], %[[VAL_5]], %[[VAL_3]] : index, memref<?xindex>, index
// CHECK: %[[VAL_18:.*]] = sparse_tensor.storage_specifier.set %[[VAL_12]] ptr_mem_sz at 0 with %[[VAL_17]] : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_18:.*]] = sparse_tensor.storage_specifier.set %[[VAL_12]] pos_mem_sz at 0 with %[[VAL_17]] : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_19:.*]], %[[VAL_21:.*]] = sparse_tensor.push_back %[[VAL_17]], %[[VAL_15]], %[[VAL_3]], %[[VAL_1]] : index, memref<?xindex>, index, index
// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] ptr_mem_sz at 0 with %[[VAL_21]] : !sparse_tensor.storage_specifier
// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] pos_mem_sz at 0 with %[[VAL_21]] : !sparse_tensor.storage_specifier
// CHECK: return %[[VAL_19]], %[[VAL_7]], %[[VAL_9]], %[[VAL_22]] : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
func.func @sparse_alloc_sparse_vector(%arg0: index) -> tensor<?xf64, #SV> {
%0 = bufferization.alloc_tensor(%arg0) : tensor<?xf64, #SV>
Expand Down
44 changes: 22 additions & 22 deletions mlir/test/Dialect/SparseTensor/conversion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@

#SparseVector64 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 64,
indexBitWidth = 64
posWidth = 64,
crdWidth = 64
}>

#SparseVector32 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 32,
indexBitWidth = 32
posWidth = 32,
crdWidth = 32
}>

#CSR = #sparse_tensor.encoding<{
Expand Down Expand Up @@ -174,63 +174,63 @@ func.func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32
return %0 : tensor<?xf32, #SparseVector>
}

// CHECK-LABEL: func @sparse_pointers(
// CHECK-LABEL: func @sparse_positions(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[T:.*]] = call @sparsePositions0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}

// CHECK-LABEL: func @sparse_pointers64(
// CHECK-LABEL: func @sparse_positions64(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: %[[T:.*]] = call @sparsePositions64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: return %[[T]] : memref<?xi64>
func.func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
%0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector64> to memref<?xi64>
func.func @sparse_positions64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
%0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector64> to memref<?xi64>
return %0 : memref<?xi64>
}

// CHECK-LABEL: func @sparse_pointers32(
// CHECK-LABEL: func @sparse_positions32(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: %[[T:.*]] = call @sparsePositions32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: return %[[T]] : memref<?xi32>
func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
%0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector32> to memref<?xi32>
func.func @sparse_positions32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
%0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector32> to memref<?xi32>
return %0 : memref<?xi32>
}

// CHECK-LABEL: func @sparse_indices(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[T:.*]] = call @sparseCoordinates0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
%0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}

// CHECK-LABEL: func @sparse_indices64(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: %[[T:.*]] = call @sparseCoordinates64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
// CHECK: return %[[T]] : memref<?xi64>
func.func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
%0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector64> to memref<?xi64>
%0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector64> to memref<?xi64>
return %0 : memref<?xi64>
}

// CHECK-LABEL: func @sparse_indices32(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
// CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: %[[T:.*]] = call @sparseCoordinates32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
// CHECK: return %[[T]] : memref<?xi32>
func.func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
%0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector32> to memref<?xi32>
%0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector32> to memref<?xi32>
return %0 : memref<?xi32>
}

Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,10 @@ func.func @sparse_constant() -> tensor<8x7xf32, #CSR>{
// CHECK-RWT: sparse_tensor.yield %[[L0T2]]
// CHECK-RWT: }
// CHECK-RWT: %[[COO:.*]] = sparse_tensor.load %[[T1]] hasInserts
// CHECK-RWT: %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[COO]]
// CHECK-RWT: %[[NSE:.*]] = sparse_tensor.number_of_entries %[[COO]]
// CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[COO]]
// CHECK-RWT: %[[I:.*]] = sparse_tensor.indices_buffer %[[COO]]
// CHECK-RWT: sparse_tensor.sort_coo hybrid_quick_sort %[[NNZ]], %[[I]] jointly %[[V]] {nx = 2 : index, ny = 0 : index}
// CHECK-RWT: %[[I:.*]] = sparse_tensor.coordinates_buffer %[[COO]]
// CHECK-RWT: sparse_tensor.sort_coo hybrid_quick_sort %[[NSE]], %[[I]] jointly %[[V]] {nx = 2 : index, ny = 0 : index}
// CHECK-RWT: %[[T3:.*]] = bufferization.alloc_tensor()
// CHECK-RWT: %[[T4:.*]] = sparse_tensor.foreach in %[[COO]] init(%[[T3]])
// CHECK-RWT: ^bb0(%[[L1I0:.*]]: index, %[[L1I1:.*]]: index, %[[L1V:.*]]: f32, %[[L1T:.*]]: tensor
Expand Down
16 changes: 8 additions & 8 deletions mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@

#SparseVector64 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 64,
indexBitWidth = 64
posWidth = 64,
crdWidth = 64
}>

#SparseVector32 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 32,
indexBitWidth = 32
posWidth = 32,
crdWidth = 32
}>

#SparseVector = #sparse_tensor.encoding<{
Expand Down Expand Up @@ -107,14 +107,14 @@ func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32

#SparseSingleton64 = #sparse_tensor.encoding<{
dimLevelType = ["singleton"],
pointerBitWidth = 64,
indexBitWidth = 64
posWidth = 64,
crdWidth = 64
}>

#SparseSingleton32 = #sparse_tensor.encoding<{
dimLevelType = ["singleton"],
pointerBitWidth = 32,
indexBitWidth = 32
posWidth = 32,
crdWidth = 32
}>

// CHECK-COO-LABEL: func @sparse_convert_singleton(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@

#SparseVector64 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 64,
indexBitWidth = 64
posWidth = 64,
crdWidth = 64
}>

#SparseVector32 = #sparse_tensor.encoding<{
dimLevelType = ["compressed"],
pointerBitWidth = 32,
indexBitWidth = 32
posWidth = 32,
crdWidth = 32
}>


Expand Down
14 changes: 7 additions & 7 deletions mlir/test/Dialect/SparseTensor/fold.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {

// CHECK-LABEL: func @sparse_dce_getters(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK-NOT: sparse_tensor.pointers
// CHECK-NOT: sparse_tensor.indices
// CHECK-NOT: sparse_tensor.positions
// CHECK-NOT: sparse_tensor.coordinates
// CHECK-NOT: sparse_tensor.values
// CHECK: return
func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
%0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%1 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref<?xf32>
return
}
Expand All @@ -54,11 +54,11 @@ func.func @sparse_concat_dce(%arg0: tensor<2xf64, #SparseVector>,
// CHECK-NOT: sparse_tensor.storage_specifier.get
// CHECK: return %[[A1]]
func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index {
%0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1
%0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
: !sparse_tensor.storage_specifier<#SparseVector>
%1 = sparse_tensor.storage_specifier.set %0 ptr_mem_sz at 0 with %arg2
%1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2
: !sparse_tensor.storage_specifier<#SparseVector>
%2 = sparse_tensor.storage_specifier.get %1 dim_sz at 0
%2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0
: !sparse_tensor.storage_specifier<#SparseVector>
return %2 : index
}
148 changes: 74 additions & 74 deletions mlir/test/Dialect/SparseTensor/invalid.mlir

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -42,22 +42,22 @@ func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()

// -----

#a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}}
#a = #sparse_tensor.encoding<{posWidth = "x"}> // expected-error {{expected an integral position bitwidth}}
func.func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()

// -----

#a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}}
#a = #sparse_tensor.encoding<{posWidth = 42}> // expected-error {{unexpected position bitwidth: 42}}
func.func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()

// -----

#a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
#a = #sparse_tensor.encoding<{crdWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
func.func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()

// -----

#a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}}
#a = #sparse_tensor.encoding<{crdWidth = 128}> // expected-error {{unexpected coordinate bitwidth: 128}}
func.func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()

// -----
Expand Down
28 changes: 14 additions & 14 deletions mlir/test/Dialect/SparseTensor/roundtrip.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {

// -----

#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}>

// CHECK-LABEL: func @sparse_pack(
// CHECK-SAME: %[[D:.*]]: tensor<6xf64>,
Expand All @@ -29,7 +29,7 @@ func.func @sparse_pack(%data: tensor<6xf64>, %index: tensor<6x1xi32>)

// -----

#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}>
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}>

// CHECK-LABEL: func @sparse_unpack(
// CHECK-SAME: %[[T:.*]]: tensor<100xf64, #
Expand Down Expand Up @@ -85,12 +85,12 @@ func.func @sparse_convert_3d_from_sparse(%arg0: tensor<8x8x8xf64, #SparseTensor>

#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>

// CHECK-LABEL: func @sparse_pointers(
// CHECK-LABEL: func @sparse_positions(
// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]] {dimension = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: %[[T:.*]] = sparse_tensor.positions %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.pointers %arg0 {dimension = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.positions %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}

Expand All @@ -100,10 +100,10 @@ func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xin

// CHECK-LABEL: func @sparse_indices_buffer(
// CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.indices_buffer %[[A]] : tensor<?x?xf64, #{{.*}}> to memref<?xindex>
// CHECK: %[[T:.*]] = sparse_tensor.coordinates_buffer %[[A]] : tensor<?x?xf64, #{{.*}}> to memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_indices_buffer(%arg0: tensor<?x?xf64, #COO>) -> memref<?xindex> {
%0 = sparse_tensor.indices_buffer %arg0 : tensor<?x?xf64, #COO> to memref<?xindex>
%0 = sparse_tensor.coordinates_buffer %arg0 : tensor<?x?xf64, #COO> to memref<?xindex>
return %0 : memref<?xindex>
}

Expand All @@ -113,10 +113,10 @@ func.func @sparse_indices_buffer(%arg0: tensor<?x?xf64, #COO>) -> memref<?xindex

// CHECK-LABEL: func @sparse_indices(
// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>)
// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]] {dimension = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: %[[T:.*]] = sparse_tensor.coordinates %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%0 = sparse_tensor.indices %arg0 {dimension = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
%0 = sparse_tensor.coordinates %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}

Expand Down Expand Up @@ -183,10 +183,10 @@ func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVec

// CHECK-LABEL: func @sparse_get_md(
// CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>
// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] dim_sz at 0
// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] lvl_sz at 0
// CHECK: return %[[T]] : index
func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index {
%0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 0
%0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 0
: !sparse_tensor.storage_specifier<#SparseVector>
return %0 : index
}
Expand All @@ -198,11 +198,11 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
// CHECK-LABEL: func @sparse_set_md(
// CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>,
// CHECK-SAME: %[[I:.*]]: index)
// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] dim_sz at 0 with %[[I]]
// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] lvl_sz at 0 with %[[I]]
// CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}>
func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index)
-> !sparse_tensor.storage_specifier<#SparseVector> {
%0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1
%0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
: !sparse_tensor.storage_specifier<#SparseVector>
return %0 : !sparse_tensor.storage_specifier<#SparseVector>
}
Expand Down
16 changes: 8 additions & 8 deletions mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,21 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ dim
#CSR = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
posWidth = 64,
crdWidth = 64
}>

// CHECK-LABEL: func private @sparse_csr(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], pointerBitWidth = 64, indexBitWidth = 64 }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], posWidth = 64, crdWidth = 64 }>>)
func.func private @sparse_csr(tensor<?x?xf32, #CSR>)

// -----

#CSC = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 0,
indexBitWidth = 0
posWidth = 0,
crdWidth = 0
}>

// CHECK-LABEL: func private @sparse_csc(
Expand All @@ -35,12 +35,12 @@ func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
#DCSC = #sparse_tensor.encoding<{
dimLevelType = [ "compressed", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 0,
indexBitWidth = 64
posWidth = 0,
crdWidth = 64
}>

// CHECK-LABEL: func private @sparse_dcsc(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, indexBitWidth = 64 }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, crdWidth = 64 }>>)
func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)

// -----
Expand Down
20 changes: 10 additions & 10 deletions mlir/test/Dialect/SparseTensor/sorted_coo.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf32>
// CHECK: %[[VAL_6:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex>
// CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -70,9 +70,9 @@ func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
Expand Down Expand Up @@ -112,13 +112,13 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f64
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex, strided<[?], offset: ?>>
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64>
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f64) outs(%[[VAL_14]] : memref<32x64xf64>)
Expand Down
98 changes: 49 additions & 49 deletions mlir/test/Dialect/SparseTensor/sparse_1d.mlir

Large diffs are not rendered by default.

114 changes: 57 additions & 57 deletions mlir/test/Dialect/SparseTensor/sparse_2d.mlir

Large diffs are not rendered by default.

106 changes: 53 additions & 53 deletions mlir/test/Dialect/SparseTensor/sparse_3d.mlir

Large diffs are not rendered by default.

50 changes: 25 additions & 25 deletions mlir/test/Dialect/SparseTensor/sparse_affine.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<4xf32>
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf32>
Expand Down Expand Up @@ -64,10 +64,10 @@ func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
// CHECK: %[[VAL_4:.*]] = arith.constant 3 : index
// CHECK: %[[VAL_5:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -162,8 +162,8 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>,
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34xi32>
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi32>
Expand Down Expand Up @@ -201,11 +201,11 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = arith.constant 2 : index
// CHECK: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi32>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi32>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -264,8 +264,8 @@ func.func @and_affine_sparse1d(%arga: tensor<32xi32, #SpVec>,
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34x19xf64>
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
Expand Down Expand Up @@ -315,11 +315,11 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
// CHECK-DAG: %[[VAL_TRUE:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_FALSE:.*]] = arith.constant false
// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_17:.*]] = %[[VAL_8]]) -> (tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>) {
// CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_16]], %[[VAL_5]] : index
Expand Down Expand Up @@ -391,11 +391,11 @@ func.func @mul_affine_sparse2d(%arga: tensor<32x16xf64, #CSR>,
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
Expand Down Expand Up @@ -455,11 +455,11 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>,
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
// CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index
// CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor()
// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index}
// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index}
// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index}
// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index}
// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index}
// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index}
// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index}
// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index}
// CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]]
// CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref<?xindex>
// CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref<?xindex>
Expand Down
122 changes: 61 additions & 61 deletions mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,15 @@
// CHECK: %[[VAL_25:.*]] = memref.cast %[[VAL_24]] : memref<300xindex> to memref<?xindex>
// CHECK: linalg.fill ins(%[[F0]] : f64) outs(%[[VAL_20]] : memref<300xf64>)
// CHECK: linalg.fill ins(%[[False]] : i1) outs(%[[VAL_22]] : memref<300xi1>)
// CHECK: %[[VAL_26:.*]] = call @sparsePointers0(%[[Arg0]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_27:.*]] = call @sparseIndices0(%[[Arg0]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_28:.*]] = call @sparsePointers0(%[[Arg0]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_29:.*]] = call @sparseIndices0(%[[Arg0]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_26:.*]] = call @sparsePositions0(%[[Arg0]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_27:.*]] = call @sparseCoordinates0(%[[Arg0]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_28:.*]] = call @sparsePositions0(%[[Arg0]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_29:.*]] = call @sparseCoordinates0(%[[Arg0]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_30:.*]] = call @sparseValuesF64(%[[Arg0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK: %[[VAL_31:.*]] = call @sparsePointers0(%[[Arg1]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_32:.*]] = call @sparseIndices0(%[[Arg1]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_33:.*]] = call @sparsePointers0(%[[Arg1]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_34:.*]] = call @sparseIndices0(%[[Arg1]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_31:.*]] = call @sparsePositions0(%[[Arg1]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_32:.*]] = call @sparseCoordinates0(%[[Arg1]], %[[I0]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_33:.*]] = call @sparsePositions0(%[[Arg1]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_34:.*]] = call @sparseCoordinates0(%[[Arg1]], %[[I1]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: %[[VAL_35:.*]] = call @sparseValuesF64(%[[Arg1]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_26]]{{\[}}%[[I0]]] : memref<?xindex>
// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_26]]{{\[}}%[[I1]]] : memref<?xindex>
Expand Down
40 changes: 20 additions & 20 deletions mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -67,8 +67,8 @@ func.func @abs(%arga: tensor<32xf64, #SV>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -99,8 +99,8 @@ func.func @ceil(%arga: tensor<32xf64, #SV>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -131,8 +131,8 @@ func.func @floor(%arga: tensor<32xf64, #SV>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -166,8 +166,8 @@ func.func @neg(%arga: tensor<32xf64, #SV>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
Expand Down Expand Up @@ -226,8 +226,8 @@ func.func @add(%arga: tensor<32xf64, #SV>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
Expand Down Expand Up @@ -286,8 +286,8 @@ func.func @sub(%arga: tensor<32xf64, #SV>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
Expand Down Expand Up @@ -322,8 +322,8 @@ func.func @mul(%arga: tensor<32xf64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -355,8 +355,8 @@ func.func @divbyc(%arga: tensor<32xf64, #SV>,
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down Expand Up @@ -403,8 +403,8 @@ func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #S
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex<f64>
// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex<f64>, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref<?xcomplex<f64>>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_index.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,10 @@ func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
// CHECK-DAG: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xi64, #sparse_tensor.encoding
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_1]]] : memref<?xindex>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_2]]] : memref<?xindex>
Expand Down
44 changes: 22 additions & 22 deletions mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -91,8 +91,8 @@ func.func @add(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : i64
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -151,8 +151,8 @@ func.func @sub(%arga: tensor<32xi64, #SV>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi64>) -> tensor<32xi64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -187,8 +187,8 @@ func.func @mul(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -221,8 +221,8 @@ func.func @divsbyc(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -255,8 +255,8 @@ func.func @divubyc(%arga: tensor<32xi64, #SV>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi64>) -> tensor<32xi64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -293,8 +293,8 @@ func.func @and(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -353,8 +353,8 @@ func.func @or(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
Expand Down Expand Up @@ -411,8 +411,8 @@ func.func @xor(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -445,8 +445,8 @@ func.func @ashrbyc(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down Expand Up @@ -479,8 +479,8 @@ func.func @lsrbyc(%arga: tensor<32xi64, #SV>,
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand Down
Loading