diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir index d6b57d348493f0..7b5444900f7f79 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -24,6 +24,13 @@ indexBitWidth = 64 }> +#CSC = #sparse_tensor.encoding<{ + dimLevelType = [ "dense", "compressed" ], + dimOrdering = affine_map<(i,j) -> (j,i)>, + pointerBitWidth = 16, + indexBitWidth = 32 +}> + // // Integration test that tests conversions between sparse tensors, // where the pointer and index sizes in the overhead storage change @@ -32,18 +39,40 @@ module { // - // Helper method to print values array. The transfer actually + // Helper method to print values and indices arrays. The transfer actually // reads more than required to verify size of buffer as well. // - func @dump(%arg0: memref) { + func @dumpf64(%arg0: memref) { %c = arith.constant 0 : index %d = arith.constant -1.0 : f64 %0 = vector.transfer_read %arg0[%c], %d: memref, vector<8xf64> vector.print %0 : vector<8xf64> return } + func @dumpi08(%arg0: memref) { + %c = arith.constant 0 : index + %d = arith.constant -1 : i8 + %0 = vector.transfer_read %arg0[%c], %d: memref, vector<8xi8> + vector.print %0 : vector<8xi8> + return + } + func @dumpi32(%arg0: memref) { + %c = arith.constant 0 : index + %d = arith.constant -1 : i32 + %0 = vector.transfer_read %arg0[%c], %d: memref, vector<8xi32> + vector.print %0 : vector<8xi32> + return + } + func @dumpi64(%arg0: memref) { + %c = arith.constant 0 : index + %d = arith.constant -1 : i64 + %0 = vector.transfer_read %arg0[%c], %d: memref, vector<8xi64> + vector.print %0 : vector<8xi64> + return + } func @entry() { + %c1 = arith.constant 1 : index %t1 = arith.constant sparse< [ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ], [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64> @@ -52,33 +81,66 @@ module { // Dense to sparse. %1 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<32x64xf64, #DCSR> %2 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<32x64xf64, #DCSC> + %3 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<32x64xf64, #CSC> // Sparse to sparse. - %3 = sparse_tensor.convert %1 : tensor<32x64xf64, #DCSR> to tensor<32x64xf64, #DCSC> - %4 = sparse_tensor.convert %2 : tensor<32x64xf64, #DCSC> to tensor<32x64xf64, #DCSR> + %4 = sparse_tensor.convert %1 : tensor<32x64xf64, #DCSR> to tensor<32x64xf64, #DCSC> + %5 = sparse_tensor.convert %2 : tensor<32x64xf64, #DCSC> to tensor<32x64xf64, #DCSR> + %6 = sparse_tensor.convert %3 : tensor<32x64xf64, #CSC> to tensor<32x64xf64, #DCSR> // // All proper row-/column-wise? // - // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) - // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 ) - // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 ) - // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) + // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) + // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 ) + // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 ) + // CHECK-NEXT: ( 1, 4, 6, 2, 5, 3, 7, -1 ) + // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, -1 ) + // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, -1 ) // %m1 = sparse_tensor.values %1 : tensor<32x64xf64, #DCSR> to memref %m2 = sparse_tensor.values %2 : tensor<32x64xf64, #DCSC> to memref - %m3 = sparse_tensor.values %3 : tensor<32x64xf64, #DCSC> to memref - %m4 = sparse_tensor.values %4 : tensor<32x64xf64, #DCSR> to memref - call @dump(%m1) : (memref) -> () - call @dump(%m2) : (memref) -> () - call @dump(%m3) : (memref) -> () - call @dump(%m4) : (memref) -> () + %m3 = sparse_tensor.values %3 : tensor<32x64xf64, #CSC> to memref + %m4 = sparse_tensor.values %4 : tensor<32x64xf64, #DCSC> to memref + %m5 = sparse_tensor.values %5 : tensor<32x64xf64, #DCSR> to memref + %m6 = sparse_tensor.values %6 : tensor<32x64xf64, #DCSR> to memref + call @dumpf64(%m1) : (memref) -> () + call @dumpf64(%m2) : (memref) -> () + call @dumpf64(%m3) : (memref) -> () + call @dumpf64(%m4) : (memref) -> () + call @dumpf64(%m5) : (memref) -> () + call @dumpf64(%m6) : (memref) -> () + + // + // Sanity check on indices. + // + // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 ) + // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 ) + // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 ) + // CHECK-NEXT: ( 0, 1, 31, 0, 1, 0, 31, -1 ) + // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 ) + // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, -1 ) + // + %i1 = sparse_tensor.indices %1, %c1 : tensor<32x64xf64, #DCSR> to memref + %i2 = sparse_tensor.indices %2, %c1 : tensor<32x64xf64, #DCSC> to memref + %i3 = sparse_tensor.indices %3, %c1 : tensor<32x64xf64, #CSC> to memref + %i4 = sparse_tensor.indices %4, %c1 : tensor<32x64xf64, #DCSC> to memref + %i5 = sparse_tensor.indices %5, %c1 : tensor<32x64xf64, #DCSR> to memref + %i6 = sparse_tensor.indices %6, %c1 : tensor<32x64xf64, #DCSR> to memref + call @dumpi08(%i1) : (memref) -> () + call @dumpi64(%i2) : (memref) -> () + call @dumpi32(%i3) : (memref) -> () + call @dumpi64(%i4) : (memref) -> () + call @dumpi08(%i5) : (memref) -> () + call @dumpi08(%i6) : (memref) -> () // Release the resources. sparse_tensor.release %1 : tensor<32x64xf64, #DCSR> sparse_tensor.release %2 : tensor<32x64xf64, #DCSC> - sparse_tensor.release %3 : tensor<32x64xf64, #DCSC> - sparse_tensor.release %4 : tensor<32x64xf64, #DCSR> + sparse_tensor.release %3 : tensor<32x64xf64, #CSC> + sparse_tensor.release %4 : tensor<32x64xf64, #DCSC> + sparse_tensor.release %5 : tensor<32x64xf64, #DCSR> + sparse_tensor.release %6 : tensor<32x64xf64, #DCSR> return }