Skip to content

Commit

Permalink
[mlir][sparse] Replace vector.print with printMemref for some tests.
Browse files Browse the repository at this point in the history
Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D139489
  • Loading branch information
bixia1 committed Dec 13, 2022
1 parent f58e457 commit efaa78c
Show file tree
Hide file tree
Showing 9 changed files with 328 additions and 228 deletions.
222 changes: 143 additions & 79 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate.mlir

Large diffs are not rendered by default.

12 changes: 7 additions & 5 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
Expand Up @@ -3,7 +3,7 @@
// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \
// DEFINE: mlir-cpu-runner \
// DEFINE: -e entry -entry-point-result=void \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \
// DEFINE: FileCheck %s
//
// RUN: %{command}
Expand Down Expand Up @@ -66,6 +66,7 @@ module {
}

func.func private @getTensorFilename(index) -> (!Filename)
func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }

//
// Main driver that reads matrix from file and calls the kernel.
Expand All @@ -86,13 +87,14 @@ module {

//
// Print the linearized 5x5 result for verification.
// CHECK: 25
// CHECK: [2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10
//
// CHECK: ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10 )
//
%n = sparse_tensor.number_of_entries %0 : tensor<?x?xf64, #DenseMatrix>
vector.print %n : index
%m = sparse_tensor.values %0
: tensor<?x?xf64, #DenseMatrix> to memref<?xf64>
%v = vector.load %m[%c0] : memref<?xf64>, vector<25xf64>
vector.print %v : vector<25xf64>
call @printMemref1dF64(%m) : (memref<?xf64>) -> ()

// Release the resources.
bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>
Expand Down
Expand Up @@ -2,7 +2,7 @@
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
// DEFINE: mlir-cpu-runner \
// DEFINE: -e entry -entry-point-result=void \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \
// DEFINE: FileCheck %s
//
// RUN: %{command}
Expand All @@ -27,15 +27,14 @@
//
module {

func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }

//
// Helper method to print values array. The transfer actually
// reads more than required to verify size of buffer as well.
//
func.func @dump(%arg0: memref<?xf64>) {
%c = arith.constant 0 : index
%d = arith.constant 0.0 : f64
%0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
vector.print %0 : vector<8xf64>
call @printMemref1dF64(%arg0) : (memref<?xf64>) -> ()
return
}

Expand All @@ -55,15 +54,32 @@ module {
%5 = sparse_tensor.convert %3 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64, #DCSC>
%6 = sparse_tensor.convert %4 : tensor<?x?xf64, #DCSC> to tensor<?x?xf64, #DCSR>

//
// Check number_of_entries.
//
// CHECK-COUNT-6: 7
%n1 = sparse_tensor.number_of_entries %1 : tensor<?x?xf64, #DCSR>
%n2 = sparse_tensor.number_of_entries %2 : tensor<?x?xf64, #DCSC>
%n3 = sparse_tensor.number_of_entries %3 : tensor<?x?xf64, #DCSR>
%n4 = sparse_tensor.number_of_entries %4 : tensor<?x?xf64, #DCSC>
%n5 = sparse_tensor.number_of_entries %5 : tensor<?x?xf64, #DCSC>
%n6 = sparse_tensor.number_of_entries %6 : tensor<?x?xf64, #DCSR>
vector.print %n1 : index
vector.print %n2 : index
vector.print %n3 : index
vector.print %n4 : index
vector.print %n5 : index
vector.print %n6 : index

//
// All proper row-/column-wise?
//
// CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
// CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
// CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
// CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
// CHECK: ( 1, 4, 6, 2, 5, 3, 7, 0 )
// CHECK: ( 1, 2, 3, 4, 5, 6, 7, 0 )
// CHECK: [1, 2, 3, 4, 5, 6, 7
// CHECK: [1, 4, 6, 2, 5, 3, 7
// CHECK: [1, 2, 3, 4, 5, 6, 7
// CHECK: [1, 4, 6, 2, 5, 3, 7
// CHECK: [1, 4, 6, 2, 5, 3, 7
// CHECK: [1, 2, 3, 4, 5, 6, 7
//
%m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
%m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>
Expand Down
25 changes: 13 additions & 12 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir
Expand Up @@ -2,7 +2,7 @@
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
// DEFINE: mlir-cpu-runner \
// DEFINE: -e entry -entry-point-result=void \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \
// DEFINE: FileCheck %s
//
// RUN: %{command}
Expand All @@ -17,6 +17,8 @@
}>

module {
func.func private @printMemrefF64(%ptr : tensor<*xf64>)

//
// Column-wise storage forces the ijk loop to permute into jki
// so that access pattern expansion (workspace) needs to be
Expand Down Expand Up @@ -63,19 +65,18 @@ module {
: (tensor<8x2xf64, #CSC>,
tensor<2x4xf64, #CSC>) -> tensor<8x4xf64, #CSC>

//
// CHECK: ( ( 32.53, 35.73, 38.93, 42.13 ),
// CHECK-SAME: ( 34.56, 37.96, 41.36, 44.76 ),
// CHECK-SAME: ( 36.59, 40.19, 43.79, 47.39 ),
// CHECK-SAME: ( 38.62, 42.42, 46.22, 50.02 ),
// CHECK-SAME: ( 40.65, 44.65, 48.65, 52.65 ),
// CHECK-SAME: ( 42.68, 46.88, 51.08, 55.28 ),
// CHECK-SAME: ( 44.71, 49.11, 53.51, 57.91 ),
// CHECK-SAME: ( 46.74, 51.34, 55.94, 60.54 ) )
// CHECK: {{\[}}[32.53, 35.73, 38.93, 42.13],
// CHECK-NEXT: [34.56, 37.96, 41.36, 44.76],
// CHECK-NEXT: [36.59, 40.19, 43.79, 47.39],
// CHECK-NEXT: [38.62, 42.42, 46.22, 50.02],
// CHECK-NEXT: [40.65, 44.65, 48.65, 52.65],
// CHECK-NEXT: [42.68, 46.88, 51.08, 55.28],
// CHECK-NEXT: [44.71, 49.11, 53.51, 57.91],
// CHECK-NEXT: [46.74, 51.34, 55.94, 60.54]]
//
%xc = sparse_tensor.convert %x3 : tensor<8x4xf64, #CSC> to tensor<8x4xf64>
%xv = vector.transfer_read %xc[%c0, %c0], %d1 : tensor<8x4xf64>, vector<8x4xf64>
vector.print %xv : vector<8x4xf64>
%xu = tensor.cast %xc : tensor<8x4xf64> to tensor<*xf64>
call @printMemrefF64(%xu) : (tensor<*xf64>) -> ()

// Release the resources.
bufferization.dealloc_tensor %x1 : tensor<8x2xf64, #CSC>
Expand Down
23 changes: 11 additions & 12 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
Expand Up @@ -3,7 +3,7 @@
// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/test.tns" \
// DEFINE: mlir-cpu-runner \
// DEFINE: -e entry -entry-point-result=void \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \
// DEFINE: FileCheck %s
//
// RUN: %{command}
Expand Down Expand Up @@ -56,6 +56,7 @@ module {
}

func.func private @getTensorFilename(index) -> (!Filename)
func.func private @printMemrefF64(%ptr : tensor<*xf64>)

//
// Main driver that reads tensor from file and calls the sparse kernel.
Expand All @@ -80,18 +81,16 @@ module {

// Print the result for verification.
//
// CHECK: ( 6.25, 0, 0 )
// CHECK: ( 4.224, 6.21, 0 )
// CHECK: ( 0, 0, 15.455 )
// CHECK: ( 0, 0, 0 )
// CHECK: ( 0, 0, 0 )
// CHECK: ( 0, 0, 0 )
// CHECK: ( 7, 0, 0 )
// CHECK: {{\[}}[6.25, 0, 0],
// CHECK-NEXT: [4.224, 6.21, 0],
// CHECK-NEXT: [0, 0, 15.455],
// CHECK-NEXT: [0, 0, 0],
// CHECK-NEXT: [0, 0, 0],
// CHECK-NEXT: [0, 0, 0],
// CHECK-NEXT: [7, 0, 0]]
//
scf.for %i = %c0 to %c7 step %c1 {
%v = vector.transfer_read %0[%i, %c0], %d0: tensor<7x3xf64>, vector<3xf64>
vector.print %v : vector<3xf64>
}
%1 = tensor.cast %0 : tensor<7x3xf64> to tensor<*xf64>
call @printMemrefF64(%1) : (tensor<*xf64>) -> ()

// Release the resources.
bufferization.dealloc_tensor %a : tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>
Expand Down
119 changes: 64 additions & 55 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
Expand Up @@ -2,21 +2,21 @@
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
// DEFINE: mlir-cpu-runner \
// DEFINE: -e entry -entry-point-result=void \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \
// DEFINE: FileCheck %s
//
// RUN: %{command}
//
// Do the same run, but now with direct IR generation.
// REDEFINE: %{option} = enable-runtime-library=false
// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true"
// RUN: %{command}
//
// Do the same run, but now with parallelization strategy.
// REDEFINE: %{option} = "enable-runtime-library=true parallelization-strategy=any-storage-any-loop"
// RUN: %{command}
//
// Do the same run, but now with direct IR generation and parallelization strategy.
// REDEFINE: %{option} = "enable-runtime-library=false parallelization-strategy=any-storage-any-loop"
// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true parallelization-strategy=any-storage-any-loop"
// RUN: %{command}

#CSR = #sparse_tensor.encoding<{
Expand All @@ -30,6 +30,9 @@
}>

module {
func.func private @printMemrefF64(%ptr : tensor<*xf64>)
func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }

//
// Computes C = A x B with all matrices dense.
//
Expand Down Expand Up @@ -70,7 +73,6 @@ module {
//
func.func @entry() {
%c0 = arith.constant 0 : index
%d1 = arith.constant -1.0 : f64

// Initialize various matrices, dense for stress testing,
// and sparse to verify correct nonzero structure.
Expand Down Expand Up @@ -178,95 +180,102 @@ module {
tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>

//
// CHECK: ( ( 388.76, 425.56, 462.36, 499.16 ),
// CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
// CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
// CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
// CHECK: {{\[}}[388.76, 425.56, 462.36, 499.16],
// CHECK-NEXT: [397.12, 434.72, 472.32, 509.92],
// CHECK-NEXT: [405.48, 443.88, 482.28, 520.68],
// CHECK-NEXT: [413.84, 453.04, 492.24, 531.44]]
//
%v0 = vector.transfer_read %0[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v0 : vector<4x4xf64>
%u0 = tensor.cast %0 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%u0) : (tensor<*xf64>) -> ()

//
// CHECK: ( ( 388.76, 425.56, 462.36, 499.16 ),
// CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
// CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
// CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
// CHECK: {{\[}}[388.76, 425.56, 462.36, 499.16],
// CHECK-NEXT: [397.12, 434.72, 472.32, 509.92],
// CHECK-NEXT: [405.48, 443.88, 482.28, 520.68],
// CHECK-NEXT: [413.84, 453.04, 492.24, 531.44]]
//
%c1 = sparse_tensor.convert %1 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
%v1 = vector.transfer_read %c1[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v1 : vector<4x4xf64>
%c1u = tensor.cast %c1 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c1u) : (tensor<*xf64>) -> ()

//
// CHECK: ( ( 388.76, 425.56, 462.36, 499.16 ),
// CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
// CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
// CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
// CHECK: {{\[}}[388.76, 425.56, 462.36, 499.16],
// CHECK-NEXT: [397.12, 434.72, 472.32, 509.92],
// CHECK-NEXT: [405.48, 443.88, 482.28, 520.68],
// CHECK-NEXT: [413.84, 453.04, 492.24, 531.44]]
//
%c2 = sparse_tensor.convert %2 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
%v2 = vector.transfer_read %c2[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v2 : vector<4x4xf64>
%c2u = tensor.cast %c2 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c2u) : (tensor<*xf64>) -> ()

//
// CHECK: ( ( 86.08, 94.28, 102.48, 110.68 ),
// CHECK-SAME: ( 0, 0, 0, 0 ),
// CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
// CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
// CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [23.46, 25.76, 28.06, 30.36],
// CHECK-NEXT: [10.8, 11.8, 12.8, 13.8]]
//
%v3 = vector.transfer_read %3[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v3 : vector<4x4xf64>
%u3 = tensor.cast %3 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%u3) : (tensor<*xf64>) -> ()

//
// CHECK: ( ( 86.08, 94.28, 102.48, 110.68 ),
// CHECK-SAME: ( 0, 0, 0, 0 ),
// CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
// CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
// CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [23.46, 25.76, 28.06, 30.36],
// CHECK-NEXT: [10.8, 11.8, 12.8, 13.8]]
//
%c4 = sparse_tensor.convert %4 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
%v4 = vector.transfer_read %c4[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v4 : vector<4x4xf64>
%c4u = tensor.cast %c4 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c4u) : (tensor<*xf64>) -> ()

//
// CHECK: ( ( 86.08, 94.28, 102.48, 110.68 ),
// CHECK-SAME: ( 0, 0, 0, 0 ),
// CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
// CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
// CHECK: {{\[}}[86.08, 94.28, 102.48, 110.68],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [23.46, 25.76, 28.06, 30.36],
// CHECK-NEXT: [10.8, 11.8, 12.8, 13.8]]
//
%c5 = sparse_tensor.convert %5 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
%v5 = vector.transfer_read %c5[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v5 : vector<4x4xf64>
%c5u = tensor.cast %c5 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c5u) : (tensor<*xf64>) -> ()

//
// CHECK-NEXT: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
// CHECK: {{\[}}[0, 30.5, 4.2, 0],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [0, 0, 4.6, 0],
// CHECK-NEXT: [0, 0, 7, 8]]
//
%v6 = vector.transfer_read %6[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v6 : vector<4x4xf64>
%u6 = tensor.cast %6 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%u6) : (tensor<*xf64>) -> ()

//
// CHECK-NEXT: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
// CHECK: {{\[}}[0, 30.5, 4.2, 0],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [0, 0, 4.6, 0],
// CHECK-NEXT: [0, 0, 7, 8]]
//
%c7 = sparse_tensor.convert %7 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
%v7 = vector.transfer_read %c7[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v7 : vector<4x4xf64>
%c7u = tensor.cast %c7 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c7u) : (tensor<*xf64>) -> ()

//
// CHECK-NEXT: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
// CHECK: {{\[}}[0, 30.5, 4.2, 0],
// CHECK-NEXT: [0, 0, 0, 0],
// CHECK-NEXT: [0, 0, 4.6, 0],
// CHECK-NEXT: [0, 0, 7, 8]]
//
%c8 = sparse_tensor.convert %8 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
%v8 = vector.transfer_read %c8[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
vector.print %v8 : vector<4x4xf64>
%c8u = tensor.cast %c8 : tensor<4x4xf64> to tensor<*xf64>
call @printMemrefF64(%c8u) : (tensor<*xf64>) -> ()

//
// Sanity check on nonzeros.
//
// CHECK-NEXT: ( 30.5, 4.2, 4.6, 7, 8 )
// CHECK-NEXT: ( 30.5, 4.2, 4.6, 7, 8 )
// CHECK: [30.5, 4.2, 4.6, 7, 8
// CHECK: [30.5, 4.2, 4.6, 7, 8
//
%val7 = sparse_tensor.values %7 : tensor<4x4xf64, #CSR> to memref<?xf64>
%val8 = sparse_tensor.values %8 : tensor<4x4xf64, #DCSR> to memref<?xf64>
%nz7 = vector.transfer_read %val7[%c0], %d1 : memref<?xf64>, vector<5xf64>
%nz8 = vector.transfer_read %val8[%c0], %d1 : memref<?xf64>, vector<5xf64>
vector.print %nz7 : vector<5xf64>
vector.print %nz8 : vector<5xf64>
call @printMemref1dF64(%val7) : (memref<?xf64>) -> ()
call @printMemref1dF64(%val8) : (memref<?xf64>) -> ()

//
// Sanity check on stored entries after the computations.
Expand Down

0 comments on commit efaa78c

Please sign in to comment.