Skip to content

Commit d59cf90

Browse files
committed
[mlir][sparse] Expose SpareTensor passes as enums instead of opaque numbers for vectorization and parallelization options.
The SparseTensor passes currently use opaque numbers for the CLI, despite using an enum internally. This patch exposes the enums instead of numbered items that are matched back to the enum. Fixes GitHub issue llvm#53389 Reviewed by: aartbik, mehdi_amini Differential Revision: https://reviews.llvm.org/D123876
1 parent 807e418 commit d59cf90

27 files changed

+86
-99
lines changed

mlir/benchmark/python/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def setup_passes(mlir_module):
1616
"""Setup pass pipeline parameters for benchmark functions.
1717
"""
1818
opt = (
19-
"parallelization-strategy=0"
19+
"parallelization-strategy=none"
2020
" vectorization-strategy=0 vl=1 enable-simd-index32=False"
2121
)
2222
pipeline = f"sparse-compiler{{{opt}}}"

mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,14 @@ namespace sparse_tensor {
3030
struct SparseCompilerOptions
3131
: public PassPipelineOptions<SparseCompilerOptions> {
3232
// These options must be kept in sync with `SparsificationBase`.
33-
PassOptions::Option<int32_t> parallelization{
33+
34+
PassOptions::Option<enum SparseParallelizationStrategy> parallelization{
3435
*this, "parallelization-strategy",
35-
desc("Set the parallelization strategy"), init(0)};
36-
PassOptions::Option<int32_t> vectorization{
36+
desc("Set the parallelization strategy"),
37+
init(SparseParallelizationStrategy::kNone)};
38+
PassOptions::Option<enum SparseVectorizationStrategy> vectorization{
3739
*this, "vectorization-strategy", desc("Set the vectorization strategy"),
38-
init(0)};
40+
init(SparseVectorizationStrategy::kNone)};
3941
PassOptions::Option<int32_t> vectorLength{
4042
*this, "vl", desc("Set the vector length"), init(1)};
4143
PassOptions::Option<bool> enableSIMDIndex32{
@@ -47,10 +49,8 @@ struct SparseCompilerOptions
4749

4850
/// Projects out the options for `createSparsificationPass`.
4951
SparsificationOptions sparsificationOptions() const {
50-
return SparsificationOptions(sparseParallelizationStrategy(parallelization),
51-
sparseVectorizationStrategy(vectorization),
52-
vectorLength, enableSIMDIndex32,
53-
enableVLAVectorization);
52+
return SparsificationOptions(parallelization, vectorization, vectorLength,
53+
enableSIMDIndex32, enableVLAVectorization);
5454
}
5555

5656
// These options must be kept in sync with `SparseTensorConversionBase`.

mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,6 @@ enum class SparseParallelizationStrategy {
4545
// TODO: support reduction parallelization too?
4646
};
4747

48-
/// Converts command-line parallelization flag to the strategy enum.
49-
SparseParallelizationStrategy sparseParallelizationStrategy(int32_t flag);
50-
5148
/// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD
5249
/// for parallel loops and horizontal SIMD for reduction loops). A loop is
5350
/// actually vectorized if (1) allowed by the strategy, and (2) the emitted
@@ -58,9 +55,6 @@ enum class SparseVectorizationStrategy {
5855
kAnyStorageInnerLoop
5956
};
6057

61-
/// Converts command-line vectorization flag to the strategy enum.
62-
SparseVectorizationStrategy sparseVectorizationStrategy(int32_t flag);
63-
6458
/// Options for the Sparsification pass.
6559
struct SparsificationOptions {
6660
SparsificationOptions(SparseParallelizationStrategy p,

mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,34 @@ def Sparsification : Pass<"sparsification", "ModuleOp"> {
6363
"vector::VectorDialect",
6464
];
6565
let options = [
66-
Option<"parallelization", "parallelization-strategy", "int32_t", "0",
67-
"Set the parallelization strategy">,
68-
Option<"vectorization", "vectorization-strategy", "int32_t", "0",
69-
"Set the vectorization strategy">,
66+
Option<"parallelization", "parallelization-strategy", "enum SparseParallelizationStrategy",
67+
"mlir::SparseParallelizationStrategy::kNone",
68+
"Set the parallelization strategy", [{llvm::cl::values(
69+
clEnumValN(mlir::SparseParallelizationStrategy::kNone, "none",
70+
"Turn off sparse parallelization."),
71+
clEnumValN(mlir::SparseParallelizationStrategy::kDenseOuterLoop,
72+
"dense-outer-loop",
73+
"Enable dense outer loop sparse parallelization."),
74+
clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageOuterLoop,
75+
"any-storage-outer-loop",
76+
"Enable sparse parallelization regardless of storage for the outer loop."),
77+
clEnumValN(mlir::SparseParallelizationStrategy::kDenseAnyLoop,
78+
"dense-any-loop",
79+
"Enable dense parallelization for any loop."),
80+
clEnumValN(mlir::SparseParallelizationStrategy::kAnyStorageAnyLoop,
81+
"any-storage-any-loop",
82+
"Enable sparse parallelization for any storage and loop."))}]>,
83+
Option<"vectorization", "vectorization-strategy", "enum SparseVectorizationStrategy",
84+
"mlir::SparseVectorizationStrategy::kNone",
85+
"Set the vectorization strategy", [{llvm::cl::values(
86+
clEnumValN(mlir::SparseVectorizationStrategy::kNone, "none",
87+
"Turn off sparse vectorization."),
88+
clEnumValN(mlir::SparseVectorizationStrategy::kDenseInnerLoop,
89+
"dense-inner-loop",
90+
"Enable vectorization for dense inner loops."),
91+
clEnumValN(mlir::SparseVectorizationStrategy::kAnyStorageInnerLoop,
92+
"any-storage-inner-loop",
93+
"Enable sparse vectorization for inner loops with any storage."))}]>,
7094
Option<"vectorLength", "vl", "int32_t", "1",
7195
"Set the vector length">,
7296
Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false",

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp

Lines changed: 4 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ struct SparsificationPass : public SparsificationBase<SparsificationPass> {
3939
SparsificationPass() = default;
4040
SparsificationPass(const SparsificationPass &pass) = default;
4141
SparsificationPass(const SparsificationOptions &options) {
42-
parallelization = static_cast<int32_t>(options.parallelizationStrategy);
43-
vectorization = static_cast<int32_t>(options.vectorizationStrategy);
42+
parallelization = options.parallelizationStrategy;
43+
vectorization = options.vectorizationStrategy;
4444
vectorLength = options.vectorLength;
4545
enableSIMDIndex32 = options.enableSIMDIndex32;
4646
enableVLAVectorization = options.enableVLAVectorization;
@@ -50,10 +50,8 @@ struct SparsificationPass : public SparsificationBase<SparsificationPass> {
5050
auto *ctx = &getContext();
5151
RewritePatternSet patterns(ctx);
5252
// Translate strategy flags to strategy options.
53-
SparsificationOptions options(
54-
sparseParallelizationStrategy(parallelization),
55-
sparseVectorizationStrategy(vectorization), vectorLength,
56-
enableSIMDIndex32, enableVLAVectorization);
53+
SparsificationOptions options(parallelization, vectorization, vectorLength,
54+
enableSIMDIndex32, enableVLAVectorization);
5755
// Apply rewriting.
5856
populateSparsificationPatterns(patterns, options);
5957
vector::populateVectorToVectorCanonicalizationPatterns(patterns);
@@ -133,33 +131,6 @@ struct SparseTensorConversionPass
133131

134132
} // namespace
135133

136-
SparseParallelizationStrategy
137-
mlir::sparseParallelizationStrategy(int32_t flag) {
138-
switch (flag) {
139-
default:
140-
return SparseParallelizationStrategy::kNone;
141-
case 1:
142-
return SparseParallelizationStrategy::kDenseOuterLoop;
143-
case 2:
144-
return SparseParallelizationStrategy::kAnyStorageOuterLoop;
145-
case 3:
146-
return SparseParallelizationStrategy::kDenseAnyLoop;
147-
case 4:
148-
return SparseParallelizationStrategy::kAnyStorageAnyLoop;
149-
}
150-
}
151-
152-
SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) {
153-
switch (flag) {
154-
default:
155-
return SparseVectorizationStrategy::kNone;
156-
case 1:
157-
return SparseVectorizationStrategy::kDenseInnerLoop;
158-
case 2:
159-
return SparseVectorizationStrategy::kAnyStorageInnerLoop;
160-
}
161-
}
162-
163134
SparseToSparseConversionStrategy
164135
mlir::sparseToSparseConversionStrategy(int32_t flag) {
165136
switch (flag) {

mlir/test/Dialect/SparseTensor/sparse_parallel.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \
1+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=none" | \
22
// RUN: FileCheck %s --check-prefix=CHECK-PAR0
3-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \
3+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-outer-loop" | \
44
// RUN: FileCheck %s --check-prefix=CHECK-PAR1
5-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \
5+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-outer-loop" | \
66
// RUN: FileCheck %s --check-prefix=CHECK-PAR2
7-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \
7+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-any-loop" | \
88
// RUN: FileCheck %s --check-prefix=CHECK-PAR3
9-
// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \
9+
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-any-loop" | \
1010
// RUN: FileCheck %s --check-prefix=CHECK-PAR4
1111

1212
#DenseMatrix = #sparse_tensor.encoding<{

mlir/test/Dialect/SparseTensor/sparse_vector.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 vl=16" -cse -split-input-file | \
1+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=none vl=16" -cse -split-input-file | \
22
// RUN: FileCheck %s --check-prefix=CHECK-VEC0
3-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 vl=16" -cse -split-input-file | \
3+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=dense-inner-loop vl=16" -cse -split-input-file | \
44
// RUN: FileCheck %s --check-prefix=CHECK-VEC1
5-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -cse -split-input-file | \
5+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -cse -split-input-file | \
66
// RUN: FileCheck %s --check-prefix=CHECK-VEC2
7-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16 enable-simd-index32=true" -cse -split-input-file | \
7+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32=true" -cse -split-input-file | \
88
// RUN: FileCheck %s --check-prefix=CHECK-VEC3
9-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
9+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
1010
// RUN: FileCheck %s --check-prefix=CHECK-VEC4
1111

1212
#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>

mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
22

3-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
3+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
44
// RUN: FileCheck %s
55

66
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}>

mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
// about what constitutes a good test! The CHECK should be
66
// minimized and named to reflect the test intent.
77

8-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
8+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
99
// RUN: FileCheck %s
1010

1111
#SparseVector = #sparse_tensor.encoding<{

mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -scf-for-loop-peeling -canonicalize | \
1+
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -scf-for-loop-peeling -canonicalize | \
22
// RUN: FileCheck %s
33

44
#SparseVector = #sparse_tensor.encoding<{

0 commit comments

Comments
 (0)