Skip to content

Commit

Permalink
[mlir][sparse] Use the runtime DimLevelType instead of a separate tab…
Browse files Browse the repository at this point in the history
…legen enum

This differential replaces all uses of SparseTensorEncodingAttr::DimLevelType with DimLevelType.  The next differential will break out a separate library for the DimLevelType enum, so that the Dialect code doesn't need to depend on the rest of the runtime

Depends On D135995

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D135996
  • Loading branch information
wrengr committed Oct 18, 2022
1 parent 4481057 commit 0e77b63
Show file tree
Hide file tree
Showing 11 changed files with 92 additions and 231 deletions.
53 changes: 34 additions & 19 deletions mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
#define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_

#include "mlir/ExecutionEngine/SparseTensor/Enums.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/OpDefinition.h"
Expand Down Expand Up @@ -36,33 +37,47 @@ SparseTensorEncodingAttr getSparseTensorEncoding(Type type);
// Dimension level types.
//

bool isDenseDim(SparseTensorEncodingAttr::DimLevelType dltp);
bool isCompressedDim(SparseTensorEncodingAttr::DimLevelType dltp);
bool isSingletonDim(SparseTensorEncodingAttr::DimLevelType dltp);

/// Convenience method to test for dense dimension (0 <= d < rank).
bool isDenseDim(RankedTensorType type, uint64_t d);

/// Convenience method to test for compressed dimension (0 <= d < rank).
bool isCompressedDim(RankedTensorType type, uint64_t d);

/// Convenience method to test for singleton dimension (0 <= d < rank).
bool isSingletonDim(RankedTensorType type, uint64_t d);
// Cannot be constexpr, because `getRank` isn't constexpr. However,
// for some strange reason, the wrapper functions below don't trigger
// the same [-Winvalid-constexpr] warning (despite this function not
// being constexpr).
inline DimLevelType getDimLevelType(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return enc.getDimLevelType()[d];
return DimLevelType::Dense; // unannotated tensor is dense
}

/// Convenience function to test for dense dimension (0 <= d < rank).
constexpr bool isDenseDim(RankedTensorType type, uint64_t d) {
return isDenseDLT(getDimLevelType(type, d));
}

/// Convenience function to test for compressed dimension (0 <= d < rank).
constexpr bool isCompressedDim(RankedTensorType type, uint64_t d) {
return isCompressedDLT(getDimLevelType(type, d));
}

/// Convenience function to test for singleton dimension (0 <= d < rank).
constexpr bool isSingletonDim(RankedTensorType type, uint64_t d) {
return isSingletonDLT(getDimLevelType(type, d));
}

//
// Dimension level properties.
//

bool isOrderedDim(SparseTensorEncodingAttr::DimLevelType dltp);
bool isUniqueDim(SparseTensorEncodingAttr::DimLevelType dltp);

/// Convenience method to test for ordered property in the
/// Convenience function to test for ordered property in the
/// given dimension (0 <= d < rank).
bool isOrderedDim(RankedTensorType type, uint64_t d);
constexpr bool isOrderedDim(RankedTensorType type, uint64_t d) {
return isOrderedDLT(getDimLevelType(type, d));
}

/// Convenience method to test for unique property in the
/// Convenience function to test for unique property in the
/// given dimension (0 <= d < rank).
bool isUniqueDim(RankedTensorType type, uint64_t d);
constexpr bool isUniqueDim(RankedTensorType type, uint64_t d) {
return isUniqueDLT(getDimLevelType(type, d));
}

//
// Reordering.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
ins
// A dimension level type for each dimension of the tensor type.
ArrayRefParameter<
"SparseTensorEncodingAttr::DimLevelType",
"::mlir::sparse_tensor::DimLevelType",
"per dimension level type"
>: $dimLevelType,
// A dimension order on the indices of this tensor type.
Expand All @@ -160,26 +160,6 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",

let genVerifyDecl = 1;
let hasCustomAssemblyFormat = 1;

let extraClassDeclaration = [{
// Dimension level types. By default, each type has the unique and
// ordered properties. Alternatives properties are indicated by
// Nu (not-unique) and No (not-ordered).
//
// TODO: separate type and property in encoding
//
enum class DimLevelType : uint8_t {
Dense = 4, // 0b001_00
Compressed = 8, // 0b010_00
CompressedNu = 9, // 0b010_01
CompressedNo = 10, // 0b010_10
CompressedNuNo = 11, // 0b010_11
Singleton = 16, // 0b100_00
SingletonNu = 17, // 0b100_01
SingletonNo = 18, // 0b100_10
SingletonNuNo = 19, // 0b100_11
};
}];
}

def IsSparseTensorPred
Expand Down
31 changes: 11 additions & 20 deletions mlir/lib/CAPI/Dialect/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,31 +22,23 @@ MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor,
// Ensure the C-API enums are int-castable to C++ equivalents.
static_assert(
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE) ==
static_cast<int>(SparseTensorEncodingAttr::DimLevelType::Dense) &&
static_cast<int>(DimLevelType::Dense) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::Compressed) &&
static_cast<int>(DimLevelType::Compressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::CompressedNu) &&
static_cast<int>(DimLevelType::CompressedNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::CompressedNo) &&
static_cast<int>(DimLevelType::CompressedNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::CompressedNuNo) &&
static_cast<int>(DimLevelType::CompressedNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::Singleton) &&
static_cast<int>(DimLevelType::Singleton) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::SingletonNu) &&
static_cast<int>(DimLevelType::SingletonNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::SingletonNo) &&
static_cast<int>(DimLevelType::SingletonNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO) ==
static_cast<int>(
SparseTensorEncodingAttr::DimLevelType::SingletonNuNo),
static_cast<int>(DimLevelType::SingletonNuNo),
"MlirSparseTensorDimLevelType (C-API) and DimLevelType (C++) mismatch");

bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
Expand All @@ -58,11 +50,10 @@ MlirAttribute mlirSparseTensorEncodingAttrGet(
MlirSparseTensorDimLevelType const *dimLevelTypes,
MlirAffineMap dimOrdering, MlirAffineMap higherOrdering,
int pointerBitWidth, int indexBitWidth) {
SmallVector<SparseTensorEncodingAttr::DimLevelType> cppDimLevelTypes;
SmallVector<DimLevelType> cppDimLevelTypes;
cppDimLevelTypes.resize(numDimLevelTypes);
for (intptr_t i = 0; i < numDimLevelTypes; ++i)
cppDimLevelTypes[i] =
static_cast<SparseTensorEncodingAttr::DimLevelType>(dimLevelTypes[i]);
cppDimLevelTypes[i] = static_cast<DimLevelType>(dimLevelTypes[i]);
return wrap(SparseTensorEncodingAttr::get(
unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering),
unwrap(higherOrdering), pointerBitWidth, indexBitWidth));
Expand Down
1 change: 1 addition & 0 deletions mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ add_mlir_dialect_library(MLIRSparseTensorDialect
MLIRIR
MLIRInferTypeOpInterface
MLIRSupport
MLIRSparseTensorRuntime
)
112 changes: 10 additions & 102 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
if (failed(parser.parseGreater()))
return {};
// Process the data from the parsed dictionary value into struct-like data.
SmallVector<SparseTensorEncodingAttr::DimLevelType, 4> dlt;
SmallVector<DimLevelType, 4> dlt;
AffineMap dimOrd = {};
AffineMap higherOrd = {};
unsigned ptr = 0;
Expand All @@ -71,23 +71,23 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
}
auto strVal = strAttr.getValue();
if (strVal == "dense") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Dense);
dlt.push_back(DimLevelType::Dense);
} else if (strVal == "compressed") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Compressed);
dlt.push_back(DimLevelType::Compressed);
} else if (strVal == "compressed-nu") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNu);
dlt.push_back(DimLevelType::CompressedNu);
} else if (strVal == "compressed-no") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNo);
dlt.push_back(DimLevelType::CompressedNo);
} else if (strVal == "compressed-nu-no") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNuNo);
dlt.push_back(DimLevelType::CompressedNuNo);
} else if (strVal == "singleton") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Singleton);
dlt.push_back(DimLevelType::Singleton);
} else if (strVal == "singleton-nu") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNu);
dlt.push_back(DimLevelType::SingletonNu);
} else if (strVal == "singleton-no") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNo);
dlt.push_back(DimLevelType::SingletonNo);
} else if (strVal == "singleton-nu-no") {
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNuNo);
dlt.push_back(DimLevelType::SingletonNuNo);
} else {
parser.emitError(parser.getNameLoc(),
"unexpected dimension level type: ")
Expand Down Expand Up @@ -258,98 +258,6 @@ mlir::sparse_tensor::getSparseTensorEncoding(Type type) {
return nullptr;
}

bool mlir::sparse_tensor::isDenseDim(
SparseTensorEncodingAttr::DimLevelType dltp) {
return dltp == SparseTensorEncodingAttr::DimLevelType::Dense;
}

bool mlir::sparse_tensor::isCompressedDim(
SparseTensorEncodingAttr::DimLevelType dltp) {
switch (dltp) {
case SparseTensorEncodingAttr::DimLevelType::Compressed:
case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
return true;
default:
return false;
}
}

bool mlir::sparse_tensor::isSingletonDim(
SparseTensorEncodingAttr::DimLevelType dltp) {
switch (dltp) {
case SparseTensorEncodingAttr::DimLevelType::Singleton:
case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
return true;
default:
return false;
}
}

bool mlir::sparse_tensor::isDenseDim(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return isDenseDim(enc.getDimLevelType()[d]);
return true; // unannotated tensor is dense
}

bool mlir::sparse_tensor::isCompressedDim(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return isCompressedDim(enc.getDimLevelType()[d]);
return false; // unannotated tensor is dense
}

bool mlir::sparse_tensor::isSingletonDim(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return isSingletonDim(enc.getDimLevelType()[d]);
return false; // unannotated tensor is dense
}

bool mlir::sparse_tensor::isOrderedDim(
SparseTensorEncodingAttr::DimLevelType dltp) {
switch (dltp) {
case SparseTensorEncodingAttr::DimLevelType::CompressedNo:
case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
case SparseTensorEncodingAttr::DimLevelType::SingletonNo:
case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
return false;
default:
return true;
}
}

bool mlir::sparse_tensor::isUniqueDim(
SparseTensorEncodingAttr::DimLevelType dltp) {
switch (dltp) {
case SparseTensorEncodingAttr::DimLevelType::CompressedNu:
case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo:
case SparseTensorEncodingAttr::DimLevelType::SingletonNu:
case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo:
return false;
default:
return true;
}
}

bool mlir::sparse_tensor::isOrderedDim(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return isOrderedDim(enc.getDimLevelType()[d]);
return true; // unannotated tensor is dense (and thus ordered)
}

bool mlir::sparse_tensor::isUniqueDim(RankedTensorType type, uint64_t d) {
assert(d < static_cast<uint64_t>(type.getRank()));
if (auto enc = getSparseTensorEncoding(type))
return isUniqueDim(enc.getDimLevelType()[d]);
return true; // unannotated tensor is dense (and thus unique)
}

uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc,
uint64_t d) {
if (enc) {
Expand Down
Loading

0 comments on commit 0e77b63

Please sign in to comment.