10 changes: 5 additions & 5 deletions mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
// RUN: mlir-opt %s -sparsification="parallelization-strategy=none" | \
// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR0
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-outer-loop" | \
// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR1
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-outer-loop" | \
// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR2
// RUN: mlir-opt %s -sparsification="parallelization-strategy=dense-any-loop" | \
// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR3
// RUN: mlir-opt %s -sparsification="parallelization-strategy=any-storage-any-loop" | \
// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR4

#DenseMatrix = #sparse_tensor.encoding<{
Expand Down
10 changes: 5 additions & 5 deletions mlir/test/Dialect/SparseTensor/sparse_vector.mlir
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
// RUN: mlir-opt %s -sparsification="vectorization-strategy=none vl=16" -cse -split-input-file | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 vl=16" -cse -split-input-file | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC0
// RUN: mlir-opt %s -sparsification="vectorization-strategy=dense-inner-loop vl=16" -cse -split-input-file | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 vl=16" -cse -split-input-file | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC1
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -cse -split-input-file | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -cse -split-input-file | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC2
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32=true" -cse -split-input-file | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16 enable-simd-index32=true" -cse -split-input-file | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC3
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=4 enable-vla-vectorization=true" -cse -split-input-file | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC4

#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py

// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
// RUN: FileCheck %s

#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
// about what constitutes a good test! The CHECK should be
// minimized and named to reflect the test intent.

// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=8" -canonicalize | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=8" -canonicalize | \
// RUN: FileCheck %s

#SparseVector = #sparse_tensor.encoding<{
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -sparsification="vectorization-strategy=any-storage-inner-loop vl=16" -scf-for-loop-peeling -canonicalize | \
// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16" -scf-for-loop-peeling -canonicalize | \
// RUN: FileCheck %s

#SparseVector = #sparse_tensor.encoding<{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s \
// RUN: --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=16 enable-simd-index32" | \
// RUN: --sparse-compiler="vectorization-strategy=2 vl=16 enable-simd-index32" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=8" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s \
// RUN: --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4 enable-simd-index32" | \
// RUN: --sparse-compiler="vectorization-strategy=2 vl=4 enable-simd-index32" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=8" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=4" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=any-storage-inner-loop vl=2" | \
// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
Expand Down
26 changes: 12 additions & 14 deletions mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,24 +140,22 @@ def main():
ir.AffineMap.get_permutation([0, 1]),
ir.AffineMap.get_permutation([1, 0])
]
vec_strategy = [
'none', 'dense-inner-loop'
]
for level in levels:
for ordering in orderings:
for pwidth in [32]:
for iwidth in [32]:
for vec in vec_strategy:
for e in [True]:
vl = 1 if vec == 0 else 16
attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)
opt = (f'parallelization-strategy=none '
f'vectorization-strategy={vec} '
f'vl={vl} enable-simd-index32={e}')
compiler = sparse_compiler.SparseCompiler(
options=opt, opt_level=0, shared_libs=[support_lib])
build_compile_and_run_SDDMMM(attr, compiler)
count = count + 1
for par in [0]:
for vec in [0, 1]:
for e in [True]:
vl = 1 if vec == 0 else 16
attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth)
opt = (f'parallelization-strategy={par} '
f'vectorization-strategy={vec} '
f'vl={vl} enable-simd-index32={e}')
compiler = sparse_compiler.SparseCompiler(
options=opt, opt_level=0, shared_libs=[support_lib])
build_compile_and_run_SDDMMM(attr, compiler)
count = count + 1
# CHECK: Passed 16 tests
print('Passed ', count, 'tests')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,12 @@ def main():
# a *single* sparse tensor. Note that we deliberate do not exhaustively
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.
par = 0
vec = 0
vl = 1
e = False
opt = (f'parallelization-strategy=none '
f'vectorization-strategy=none '
opt = (f'parallelization-strategy={par} '
f'vectorization-strategy={vec} '
f'vl={vl} enable-simd-index32={e}')
levels = [[st.DimLevelType.dense, st.DimLevelType.dense],
[st.DimLevelType.dense, st.DimLevelType.compressed],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,11 +182,13 @@ def main():
# CHECK-LABEL: TEST: test_stress
print("\nTEST: test_stress")
with ir.Context() as ctx, ir.Location.unknown():
par = 0
vec = 0
vl = 1
e = False
sparsification_options = (
f'parallelization-strategy=none '
f'vectorization-strategy=none '
f'parallelization-strategy={par} '
f'vectorization-strategy={vec} '
f'vl={vl} '
f'enable-simd-index32={e}')
compiler = sparse_compiler.SparseCompiler(
Expand Down