Skip to content

Commit 91470d6

Browse files
committed
Revert "[mlir][sparse] Expose SparseTensor passes as enums instead of opaque"
This reverts commit ef25b5d.
1 parent ef25b5d commit 91470d6

27 files changed

+101
-118
lines changed

mlir/benchmark/python/common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ def setup_passes(mlir_module):
1313
"""Setup pass pipeline parameters for benchmark functions.
1414
"""
1515
opt = (
16-
"parallelization-strategy=none"
17-
" vectorization-strategy=none vl=1 enable-simd-index32=False"
16+
"parallelization-strategy=0"
17+
" vectorization-strategy=0 vl=1 enable-simd-index32=False"
1818
)
1919
pipeline = f"sparse-compiler{{{opt}}}"
2020
PassManager.parse(pipeline).run(mlir_module)

mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h

Lines changed: 9 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -30,43 +30,12 @@ namespace sparse_tensor {
3030
struct SparseCompilerOptions
3131
: public PassPipelineOptions<SparseCompilerOptions> {
3232
// These options must be kept in sync with `SparsificationBase`.
33-
// TODO(57514): These options are duplicated in Passes.td.
34-
PassOptions::Option<mlir::SparseParallelizationStrategy> parallelization{
33+
PassOptions::Option<int32_t> parallelization{
3534
*this, "parallelization-strategy",
36-
::llvm::cl::desc("Set the parallelization strategy"),
37-
::llvm::cl::init(mlir::SparseParallelizationStrategy::kNone),
38-
llvm::cl::values(
39-
clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
40-
"Turn off sparse parallelization."),
41-
clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
42-
"dense-outer-loop",
43-
"Enable dense outer loop sparse parallelization."),
44-
clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
45-
"any-storage-outer-loop",
46-
"Enable sparse parallelization regardless of storage for "
47-
"the outer loop."),
48-
clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
49-
"dense-any-loop",
50-
"Enable dense parallelization for any loop."),
51-
clEnumValN(
52-
mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
53-
"any-storage-any-loop",
54-
"Enable sparse parallelization for any storage and loop."))};
55-
PassOptions::Option<mlir::SparseVectorizationStrategy> vectorization{
56-
*this, "vectorization-strategy",
57-
::llvm::cl::desc("Set the vectorization strategy"),
58-
::llvm::cl::init(mlir::SparseVectorizationStrategy::kNone),
59-
llvm::cl::values(
60-
clEnumValN(mlir::SparseVectorizationStrategy::kNone, "none",
61-
"Turn off sparse vectorization."),
62-
clEnumValN(mlir::SparseVectorizationStrategy::kDenseInnerLoop,
63-
"dense-inner-loop",
64-
"Enable vectorization for dense inner loops."),
65-
clEnumValN(mlir::SparseVectorizationStrategy::kAnyStorageInnerLoop,
66-
"any-storage-inner-loop",
67-
"Enable sparse vectorization for inner loops with any "
68-
"storage."))};
69-
35+
desc("Set the parallelization strategy"), init(0)};
36+
PassOptions::Option<int32_t> vectorization{
37+
*this, "vectorization-strategy", desc("Set the vectorization strategy"),
38+
init(0)};
7039
PassOptions::Option<int32_t> vectorLength{
7140
*this, "vl", desc("Set the vector length"), init(1)};
7241
PassOptions::Option<bool> enableSIMDIndex32{
@@ -81,8 +50,10 @@ struct SparseCompilerOptions
8150

8251
/// Projects out the options for `createSparsificationPass`.
8352
SparsificationOptions sparsificationOptions() const {
84-
return SparsificationOptions(parallelization, vectorization, vectorLength,
85-
enableSIMDIndex32, enableVLAVectorization);
53+
return SparsificationOptions(sparseParallelizationStrategy(parallelization),
54+
sparseVectorizationStrategy(vectorization),
55+
vectorLength, enableSIMDIndex32,
56+
enableVLAVectorization);
8657
}
8758

8859
// These options must be kept in sync with `SparseTensorConversionBase`.

mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,9 @@ enum class SparseParallelizationStrategy {
4949
// TODO: support reduction parallelization too?
5050
};
5151

52+
/// Converts command-line parallelization flag to the strategy enum.
53+
SparseParallelizationStrategy sparseParallelizationStrategy(int32_t flag);
54+
5255
/// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD
5356
/// for parallel loops and horizontal SIMD for reduction loops). A loop is
5457
/// actually vectorized if (1) allowed by the strategy, and (2) the emitted
@@ -59,6 +62,9 @@ enum class SparseVectorizationStrategy {
5962
kAnyStorageInnerLoop
6063
};
6164

65+
/// Converts command-line vectorization flag to the strategy enum.
66+
SparseVectorizationStrategy sparseVectorizationStrategy(int32_t flag);
67+
6268
/// Options for the Sparsification pass.
6369
struct SparsificationOptions {
6470
SparsificationOptions(SparseParallelizationStrategy p,

mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td

Lines changed: 4 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -62,36 +62,11 @@ def SparsificationPass : Pass<"sparsification", "ModuleOp"> {
6262
"sparse_tensor::SparseTensorDialect",
6363
"vector::VectorDialect",
6464
];
65-
// TODO(57514): These enum options are duplicated in Passes.h.
6665
let options = [
67-
Option<"parallelization", "parallelization-strategy", "mlir::SparseParallelizationStrategy",
68-
"mlir::SparseParallelizationStrategy::kNone",
69-
"Set the parallelization strategy", [{llvm::cl::values(
70-
clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
71-
"Turn off sparse parallelization."),
72-
clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
73-
"dense-outer-loop",
74-
"Enable dense outer loop sparse parallelization."),
75-
clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
76-
"any-storage-outer-loop",
77-
"Enable sparse parallelization regardless of storage for the outer loop."),
78-
clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
79-
"dense-any-loop",
80-
"Enable dense parallelization for any loop."),
81-
clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
82-
"any-storage-any-loop",
83-
"Enable sparse parallelization for any storage and loop."))}]>,
84-
Option<"vectorization", "vectorization-strategy", "mlir::SparseVectorizationStrategy",
85-
"mlir::SparseVectorizationStrategy::kNone",
86-
"Set the vectorization strategy", [{llvm::cl::values(
87-
clEnumValN(mlir::SparseVectorizationStrategy::kNone, "none",
88-
"Turn off sparse vectorization."),
89-
clEnumValN(mlir::SparseVectorizationStrategy::kDenseInnerLoop,
90-
"dense-inner-loop",
91-
"Enable vectorization for dense inner loops."),
92-
clEnumValN(mlir::SparseVectorizationStrategy::kAnyStorageInnerLoop,
93-
"any-storage-inner-loop",
94-
"Enable sparse vectorization for inner loops with any storage."))}]>,
66+
Option<"parallelization", "parallelization-strategy", "int32_t", "0",
67+
"Set the parallelization strategy">,
68+
Option<"vectorization", "vectorization-strategy", "int32_t", "0",
69+
"Set the vectorization strategy">,
9570
Option<"vectorLength", "vl", "int32_t", "1",
9671
"Set the vector length">,
9772
Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false",

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ struct SparsificationPass
4343
SparsificationPass() = default;
4444
SparsificationPass(const SparsificationPass &pass) = default;
4545
SparsificationPass(const SparsificationOptions &options) {
46-
parallelization = options.parallelizationStrategy;
47-
vectorization = options.vectorizationStrategy;
46+
parallelization = static_cast<int32_t>(options.parallelizationStrategy);
47+
vectorization = static_cast<int32_t>(options.vectorizationStrategy);
4848
vectorLength = options.vectorLength;
4949
enableSIMDIndex32 = options.enableSIMDIndex32;
5050
enableVLAVectorization = options.enableVLAVectorization;
@@ -57,8 +57,10 @@ struct SparsificationPass
5757
populateSparseTensorRewriting(prePatterns);
5858
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(prePatterns));
5959
// Translate strategy flags to strategy options.
60-
SparsificationOptions options(parallelization, vectorization, vectorLength,
61-
enableSIMDIndex32, enableVLAVectorization);
60+
SparsificationOptions options(
61+
sparseParallelizationStrategy(parallelization),
62+
sparseVectorizationStrategy(vectorization), vectorLength,
63+
enableSIMDIndex32, enableVLAVectorization);
6264
// Apply sparsification and vector cleanup rewriting.
6365
RewritePatternSet patterns(ctx);
6466
populateSparsificationPatterns(patterns, options);
@@ -236,6 +238,33 @@ struct SparseTensorStorageExpansionPass
236238
// Strategy flag methods.
237239
//===----------------------------------------------------------------------===//
238240

241+
SparseParallelizationStrategy
242+
mlir::sparseParallelizationStrategy(int32_t flag) {
243+
switch (flag) {
244+
default:
245+
return SparseParallelizationStrategy::kNone;
246+
case 1:
247+
return SparseParallelizationStrategy::kDenseOuterLoop;
248+
case 2:
249+
return SparseParallelizationStrategy::kAnyStorageOuterLoop;
250+
case 3:
251+
return SparseParallelizationStrategy::kDenseAnyLoop;
252+
case 4:
253+
return SparseParallelizationStrategy::kAnyStorageAnyLoop;
254+
}
255+
}
256+
257+
SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) {
258+
switch (flag) {
259+
default:
260+
return SparseVectorizationStrategy::kNone;
261+
case 1:
262+
return SparseVectorizationStrategy::kDenseInnerLoop;
263+
case 2:
264+
return SparseVectorizationStrategy::kAnyStorageInnerLoop;
265+
}
266+
}
267+
239268
SparseToSparseConversionStrategy
240269
mlir::sparseToSparseConversionStrategy(int32_t flag) {
241270
switch (flag) {

mlir/test/Dialect/SparseTensor/sparse_parallel.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=none" | \
1+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \
22
// RUN: FileCheck %s --check-prefix=CHECK-PAR0
3-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-outer-loop" | \
3+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \
44
// RUN: FileCheck %s --check-prefix=CHECK-PAR1
5-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-outer-loop" | \
5+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \
66
// RUN: FileCheck %s --check-prefix=CHECK-PAR2
7-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-any-loop" | \
7+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \
88
// RUN: FileCheck %s --check-prefix=CHECK-PAR3
9-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-any-loop" | \
9+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \
1010
// RUN: FileCheck %s --check-prefix=CHECK-PAR4
1111

1212
#DenseMatrix = #sparse_tensor.encoding<{

mlir/test/Dialect/SparseTensor/sparse_vector.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=none vl=16" -cse -split-input-file | \
1+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 vl=16" -cse -split-input-file | \
22
// RUN: FileCheck %s --check-prefix=CHECK-VEC0
3-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=dense-inner-loop vl=16" -cse -split-input-file | \
3+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 vl=16" -cse -split-input-file | \
44
// RUN: FileCheck %s --check-prefix=CHECK-VEC1
5-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -cse -split-input-file | \
5+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -cse -split-input-file | \
66
// RUN: FileCheck %s --check-prefix=CHECK-VEC2
7-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32=true" -cse -split-input-file | \
7+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16 enable-simd-index32=true" -cse -split-input-file | \
88
// RUN: FileCheck %s --check-prefix=CHECK-VEC3
9-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
9+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
1010
// RUN: FileCheck %s --check-prefix=CHECK-VEC4
1111

1212
#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>

mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
22

3-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
3+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
44
// RUN: FileCheck %s
55

66
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}>

mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
// about what constitutes a good test! The CHECK should be
66
// minimized and named to reflect the test intent.
77

8-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
8+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
99
// RUN: FileCheck %s
1010

1111
#SparseVector = #sparse_tensor.encoding<{

mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -scf-for-loop-peeling -canonicalize | \
1+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -scf-for-loop-peeling -canonicalize | \
22
// RUN: FileCheck %s
33

44
#SparseVector = #sparse_tensor.encoding<{

0 commit comments

Comments
 (0)