diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h index 675c153477919..b27cc1eff934f 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -204,8 +204,6 @@ enum class LevelFormat : uint8_t { enum class LevelNondefaultProperty : uint8_t { Nonunique = 1, // 0b00000_01 Nonordered = 2, // 0b00000_10 - High = 32, // 0b01000_00 - Block2_4 = 64 // 0b10000_00 }; /// Returns string representation of the given dimension level type. diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp index b8483f5db130d..a4985fc9e8a8b 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp @@ -63,13 +63,11 @@ FailureOr LvlTypeParser::parseLvlType(AsmParser &parser) const { if (base.compare("dense") == 0) { properties |= static_cast(LevelFormat::Dense); } else if (base.compare("compressed") == 0) { - // TODO: Remove this condition once dimLvlType enum is refactored. Current - // enum treats High and TwoOutOfFour as formats instead of properties. - if (!(properties & static_cast(LevelNondefaultProperty::High) || - properties & - static_cast(LevelNondefaultProperty::Block2_4))) { - properties |= static_cast(LevelFormat::Compressed); - } + properties |= static_cast(LevelFormat::Compressed); + } else if (base.compare("block2_4") == 0) { + properties |= static_cast(LevelFormat::TwoOutOfFour); + } else if (base.compare("loose_compressed") == 0) { + properties |= static_cast(LevelFormat::CompressedWithHi); } else if (base.compare("singleton") == 0) { properties |= static_cast(LevelFormat::Singleton); } else { @@ -90,10 +88,6 @@ ParseResult LvlTypeParser::parseProperty(AsmParser &parser, *properties |= static_cast(LevelNondefaultProperty::Nonunique); } else if (strVal.compare("nonordered") == 0) { *properties |= static_cast(LevelNondefaultProperty::Nonordered); - } else if (strVal.compare("high") == 0) { - *properties |= static_cast(LevelNondefaultProperty::High); - } else if (strVal.compare("block2_4") == 0) { - *properties |= static_cast(LevelNondefaultProperty::Block2_4); } else { parser.emitError(parser.getCurrentLocation(), "unknown level property"); return failure(); diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir index 60367b43a6ee0..0e77889242925 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -65,7 +65,7 @@ func.func private @sparse_coo(tensor) // ----- #BCOO = #sparse_tensor.encoding<{ - map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) + map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }> // CHECK-LABEL: func private @sparse_bcoo( @@ -148,7 +148,7 @@ func.func private @sparse_slice(tensor) // below) to encode a 2D matrix, but it would require dim2lvl mapping which is not ready yet. // So we take the simple path for now. #NV_24= #sparse_tensor.encoding<{ - map = (d0, d1) -> (d0 : dense, d1 : compressed(block2_4)) + map = (d0, d1) -> (d0 : dense, d1 : block2_4) }> // CHECK-LABEL: func private @sparse_2_out_of_4( @@ -199,7 +199,7 @@ func.func private @BCSR_explicit(%arg0: tensor) { map = ( i, j ) -> ( i : dense, j floordiv 4 : dense, - j mod 4 : compressed(block2_4) + j mod 4 : block2_4 ) }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir index 56f966e903912..9ba47bdf6d108 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T } #BatchedVector = #sparse_tensor.encoding<{ - map = (d0, d1) -> (d0 : dense, d1 : compressed(high)) + map = (d0, d1) -> (d0 : dense, d1 : loose_compressed) }> // CHECK-LABEL: func.func @sub_ss_batched( // CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir index 822cfb0148f24..d05d3d5a49cfa 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir @@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) { } #BCOO = #sparse_tensor.encoding<{ - map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) + map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }> // CHECK-LABEL: func.func @foreach_bcoo( diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir index c464d01bf2ab3..a34d62e136947 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -43,7 +43,7 @@ }> #BCOO = #sparse_tensor.encoding<{ - map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) + map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }> module {