241 changes: 0 additions & 241 deletions mlir/test/Dialect/MemRef/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,99 +1,5 @@
// RUN: mlir-opt %s -canonicalize --split-input-file -allow-unregistered-dialect | FileCheck %s

// Test case: Basic folding of memref.tensor_load(memref.buffer_cast(t)) -> t
// CHECK-LABEL: func @tensor_load_of_buffer_cast(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xf32>) -> tensor<?xf32> {
// CHECK: return %[[TENSOR]]
func @tensor_load_of_buffer_cast(%arg0: tensor<?xf32>) -> tensor<?xf32> {
%0 = memref.buffer_cast %arg0 : memref<?xf32>
%1 = memref.tensor_load %0 : memref<?xf32>
return %1 : tensor<?xf32>
}

// -----

// Test case: Basic folding of memref.buffer_cast(memref.tensor_load(m)) -> m
// CHECK-LABEL: func @buffer_cast_of_tensor_load(
// CHECK-SAME: %[[MEMREF:.*]]: memref<?xf32>) -> memref<?xf32> {
// CHECK: return %[[MEMREF]]
func @buffer_cast_of_tensor_load(%arg0: memref<?xf32>) -> memref<?xf32> {
%0 = memref.tensor_load %arg0 : memref<?xf32>
%1 = memref.buffer_cast %0 : memref<?xf32>
return %1 : memref<?xf32>
}

// -----

// Test case: If the memrefs are not the same type, don't fold them.
// Test case: If the memrefs are not cast-compatible (e.g. different address space),
// don't canonicalize them either.
// CHECK-LABEL: func @no_fold_buffer_cast_of_tensor_load(
// CHECK-SAME: %[[MEMREF_ADDRSPACE2:.*]]: memref<?xf32, 2>)
// CHECK-SAME: -> memref<?xf32, 7> {
// CHECK: %[[TENSOR:.*]] = memref.tensor_load
// CHECK_SAME: %[[MEMREF_ADDRSPACE2]] : memref<?xf32, 2>
// CHECK: %[[MEMREF_ADDRSPACE7:.*]] = memref.buffer_cast
// CHECK_SAME: %[[TENSOR]] : memref<?xf32, 7>
// CHECK: return %[[MEMREF_ADDRSPACE7]]
func @no_fold_buffer_cast_of_tensor_load(%arg0: memref<?xf32, 2>) -> memref<?xf32, 7> {
%0 = memref.tensor_load %arg0 : memref<?xf32, 2>
%1 = memref.buffer_cast %0 : memref<?xf32, 7>
return %1 : memref<?xf32, 7>
}

// -----

// CHECK-DAG: #[[$OFF_3:[a-z0-9]+]] = affine_map<(d0) -> (d0 + 3)>
// CHECK-DAG: #[[$OFF_UNK:[a-z0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>

// Test case: If the memrefs are definitely cast-compatible, canonicalize to
// cast.
// CHECK-LABEL: func @canonicalize_buffer_cast_of_tensor_load(
// CHECK-SAME: %[[M:.*]]: memref<?xf32, #[[$OFF_3]]>)
// CHECK-SAME: -> memref<?xf32, #[[$OFF_UNK]]> {
// CHECK-NOT: memref.tensor_load
// CHECK-NOT: memref.buffer_cast
// CHECK: %[[R:.*]] = memref.cast %[[M]]
// CHECK-SAME: memref<?xf32, #[[$OFF_3]]> to memref<?xf32, #[[$OFF_UNK]]>
// CHECK: return %[[R]]
func @canonicalize_buffer_cast_of_tensor_load(
%arg0: memref<?xf32, offset: 3, strides: [1]>)
-> memref<?xf32, offset: ?, strides: [1]>
{
%0 = memref.tensor_load %arg0 : memref<?xf32, offset: 3, strides: [1]>
%1 = memref.buffer_cast %0 : memref<?xf32, offset: ?, strides: [1]>
return %1 : memref<?xf32, offset: ?, strides: [1]>
}

// -----

// CHECK-DAG: #[[$OFF_UNK:[a-z0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[$OFF_3:[a-z0-9]+]] = affine_map<(d0) -> (d0 + 3)>

// Test case: If the memrefs are potentially cast-compatible, canonicalize to
// copy.
// CHECK-LABEL: func @canonicalize_buffer_cast_of_tensor_load_to_copy(
// CHECK-SAME: %[[M:.*]]: memref<?xf32, #[[$OFF_UNK]]>)
// CHECK-SAME: -> memref<?xf32, #[[$OFF_3]]> {
// CHECK-NOT: memref.tensor_load
// CHECK-NOT: memref.buffer_cast
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[DIM:.*]] = memref.dim %[[M]], %[[C0]] : memref<?xf32, #[[$OFF_UNK]]>
// CHECK: %[[ALLOC:.*]] = memref.alloc(%[[DIM]]) : memref<?xf32, #[[$OFF_3]]>
// CHECK: memref.copy %[[M]], %[[ALLOC]]
// CHECK-SAME: memref<?xf32, #[[$OFF_UNK]]> to memref<?xf32, #[[$OFF_3]]>
// CHECK: return %[[ALLOC]]
func @canonicalize_buffer_cast_of_tensor_load_to_copy(
%arg0: memref<?xf32, offset: ?, strides: [1]>)
-> memref<?xf32, offset: 3, strides: [1]>
{
%0 = memref.tensor_load %arg0 : memref<?xf32, offset: ?, strides: [1]>
%1 = memref.buffer_cast %0 : memref<?xf32, offset: 3, strides: [1]>
return %1 : memref<?xf32, offset: 3, strides: [1]>
}

// -----

// CHECK-LABEL: func @subview_of_memcast
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<4x6x16x32xi8>
// CHECK: %[[S:.+]] = memref.subview %arg0[0, 1, 0] [1, 1, 16] [1, 1, 1] : memref<4x6x16x32xi8> to memref<16x32xi8, #{{.*}}>
Expand Down Expand Up @@ -216,107 +122,6 @@ func @multiple_reducing_dims_all_dynamic(%arg0 : memref<?x?x?xf32, offset: ?, st
// CHECK-SAME: : memref<1x?xf32, #[[MAP1]]> to memref<?xf32, #[[MAP0]]>


// -----

// CHECK-LABEL: @clone_before_dealloc
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_before_dealloc(%arg0: memref<?xf32>) -> memref<?xf32> {
// CHECK-NEXT: return %[[ARG]]
%0 = memref.clone %arg0 : memref<?xf32> to memref<?xf32>
memref.dealloc %arg0 : memref<?xf32>
return %0 : memref<?xf32>
}

// -----

// CHECK-LABEL: @clone_before_dealloc
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_before_dealloc(%arg0: memref<?xf32>) -> memref<?xf32> {
// CHECK-NEXT: "use"(%arg0)
// CHECK-NEXT: return %[[ARG]]
%0 = memref.clone %arg0 : memref<?xf32> to memref<?xf32>
"use"(%0) : (memref<?xf32>) -> ()
memref.dealloc %0 : memref<?xf32>
return %arg0 : memref<?xf32>
}

// -----

// CHECK-LABEL: @clone_after_cast
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_after_cast(%arg0: memref<?xf32>) -> memref<32xf32> {
// CHECK-NEXT: memref.clone %[[ARG]] : memref<?xf32> to memref<32xf32>
// CHECK-NOT: memref.cast
%0 = memref.cast %arg0 : memref<?xf32> to memref<32xf32>
%1 = memref.clone %0 : memref<32xf32> to memref<32xf32>
return %1 : memref<32xf32>
}

// -----

// CHECK-LABEL: @clone_and_cast
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_and_cast(%arg0: memref<?xf32>) -> memref<32xf32> {
// CHECK-NEXT: %[[RES:.*]] = memref.cast %[[ARG]] : memref<?xf32> to memref<32xf32>
%0 = memref.clone %arg0 : memref<?xf32> to memref<32xf32>
// CHECK-NEXT: return %[[RES]]
memref.dealloc %arg0 : memref<?xf32>
return %0 : memref<32xf32>
}

// -----

// CHECK-LABEL: @alias_is_freed
func @alias_is_freed(%arg0 : memref<?xf32>) {
// CHECK: memref.clone
// CHECK: memref.dealloc
// CHECK: memref.dealloc
%0 = memref.cast %arg0 : memref<?xf32> to memref<32xf32>
%1 = memref.clone %0 : memref<32xf32> to memref<32xf32>
memref.dealloc %arg0 : memref<?xf32>
"use"(%1) : (memref<32xf32>) -> ()
memref.dealloc %1 : memref<32xf32>
return
}

// -----

// Verify SimplifyClones skips clones with multiple deallocations.
// CHECK-LABEL: @clone_multiple_dealloc_of_source
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_multiple_dealloc_of_source(%arg0: memref<?xf32>) -> memref<?xf32> {
// CHECK-NEXT: %[[RES:.*]] = memref.clone %[[ARG]]
// CHECK: memref.dealloc %[[ARG]]
// CHECK: memref.dealloc %[[ARG]]
// CHECK: return %[[RES]]
%0 = memref.clone %arg0 : memref<?xf32> to memref<?xf32>
"if_else"() ({
memref.dealloc %arg0 : memref<?xf32>
}, {
memref.dealloc %arg0 : memref<?xf32>
}) : () -> ()
return %0 : memref<?xf32>
}

// -----

// CHECK-LABEL: @clone_multiple_dealloc_of_clone
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32>
func @clone_multiple_dealloc_of_clone(%arg0: memref<?xf32>) -> memref<?xf32> {
// CHECK-NEXT: %[[CLONE:.*]] = memref.clone %[[ARG]]
// CHECK: memref.dealloc %[[CLONE]]
// CHECK: memref.dealloc %[[CLONE]]
// CHECK: return %[[ARG]]
%0 = memref.clone %arg0 : memref<?xf32> to memref<?xf32>
"use"(%0) : (memref<?xf32>) -> ()
"if_else"() ({
memref.dealloc %0 : memref<?xf32>
}, {
memref.dealloc %0 : memref<?xf32>
}) : () -> ()
return %arg0 : memref<?xf32>
}

// -----

// CHECK-LABEL: func @dim_of_sized_view
Expand All @@ -343,38 +148,6 @@ func @no_fold_of_store(%arg : memref<32xi8>, %holder: memref<memref<?xi8>>) {

// -----

// Test case: Folding of memref.load(memref.buffer_cast(%v, %idxs))
// -> tensor.extract(%v, %idx)
// CHECK-LABEL: func @load_from_buffer_cast(
// CHECK-SAME: %[[IDX0:[0-9a-z]+]]: index, %[[IDX1:[0-9a-z]+]]: index
// CHECK-SAME: %[[TENSOR:[0-9a-z]+]]: tensor<?x?xf32>
// CHECK: %[[RES:.*]] = tensor.extract %[[TENSOR]][%[[IDX0]], %[[IDX1]]]
// CHECK-NOT: memref.load
// CHECK: return %[[RES]] : f32
func @load_from_buffer_cast(%arg0: index, %arg1: index, %arg2: tensor<?x?xf32>) -> f32 {
%0 = memref.buffer_cast %arg2 : memref<?x?xf32>
%1 = memref.load %0[%arg0, %arg1] : memref<?x?xf32>
return %1 : f32
}

// -----


// Test case: Basic folding of tensor.dim(memref.tensor_load(m)) -> memref.dim(m).
// CHECK-LABEL: func @dim_of_tensor_load(
// CHECK-SAME: %[[MEMREF:[0-9a-z]*]]: memref<?xf32>
// CHECK: %[[C0:.*]] = arith.constant 0
// CHECK: %[[D:.*]] = memref.dim %[[MEMREF]], %[[C0]]
// CHECK: return %[[D]] : index
func @dim_of_tensor_load(%arg0: memref<?xf32>) -> index {
%c0 = arith.constant 0 : index
%0 = memref.tensor_load %arg0 : memref<?xf32>
%1 = tensor.dim %0, %c0 : tensor<?xf32>
return %1 : index
}

// -----

// Test case: Folding of memref.dim(memref.alloca(%size), %idx) -> %size
// CHECK-LABEL: func @dim_of_alloca(
// CHECK-SAME: %[[SIZE:[0-9a-z]+]]: index
Expand Down Expand Up @@ -445,20 +218,6 @@ func @dim_of_memref_reshape_i32(%arg0: memref<*xf32>, %arg1: memref<?xi32>)

// -----

// CHECK-LABEL: func @tensor_cast_to_memref
// CHECK-SAME: %[[ARG0:.+]]: tensor<4x6x16x32xi8>
// CHECK: %[[M:.+]] = memref.buffer_cast %[[ARG0]] : memref<4x6x16x32xi8>
// CHECK: %[[M1:.+]] = memref.cast %[[M]] : memref<4x6x16x32xi8> to memref<?x?x16x32xi8>
// CHECK: return %[[M1]] : memref<?x?x16x32xi8>
func @tensor_cast_to_memref(%arg0 : tensor<4x6x16x32xi8>) ->
memref<?x?x16x32xi8> {
%0 = tensor.cast %arg0 : tensor<4x6x16x32xi8> to tensor<?x?x16x32xi8>
%1 = memref.buffer_cast %0 : memref<?x?x16x32xi8>
return %1 : memref<?x?x16x32xi8>
}

// -----

// CHECK-LABEL: func @alloc_const_fold
func @alloc_const_fold() -> memref<?xf32> {
// CHECK-NEXT: %0 = memref.alloc() : memref<4xf32>
Expand Down
16 changes: 0 additions & 16 deletions mlir/test/Dialect/MemRef/ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,6 @@
// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)>

// CHECK-LABEL: test_buffer_cast
func @test_buffer_cast(%arg0: tensor<?xi64>, %arg1: tensor<*xi64>) -> (memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1>) {
%0 = memref.buffer_cast %arg0 : memref<?xi64, affine_map<(d0) -> (d0 + 7)>>
%1 = memref.buffer_cast %arg1 : memref<*xi64, 1>
return %0, %1 : memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1>
}

// CHECK-LABEL: func @memref_reinterpret_cast
func @memref_reinterpret_cast(%in: memref<?xf32>)
-> memref<10x?xf32, offset: ?, strides: [?, 1]> {
Expand Down Expand Up @@ -62,15 +55,6 @@ func @write_global_memref() {
// CHECK-LABEL: func @read_global_memref
func @read_global_memref() {
%0 = memref.get_global @memref0 : memref<2xf32>
%1 = memref.tensor_load %0 : memref<2xf32>
return
}

// CHECK-LABEL: func @memref_clone
func @memref_clone() {
%0 = memref.alloc() : memref<2xf32>
%1 = memref.cast %0 : memref<2xf32> to memref<*xf32>
%2 = memref.clone %1 : memref<*xf32> to memref<*xf32>
return
}

Expand Down
22 changes: 11 additions & 11 deletions mlir/test/Dialect/SCF/bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
// CHECK-SAME: %[[PRED:.*]]: i1,
// CHECK-SAME: %[[TRUE_TENSOR:.*]]: tensor<?xf32>,
// CHECK-SAME: %[[FALSE_TENSOR:.*]]: tensor<?xf32>) -> tensor<?xf32> {
// CHECK: %[[TRUE_MEMREF:.*]] = memref.buffer_cast %[[TRUE_TENSOR]] : memref<?xf32>
// CHECK: %[[FALSE_MEMREF:.*]] = memref.buffer_cast %[[FALSE_TENSOR]] : memref<?xf32>
// CHECK: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : memref<?xf32>
// CHECK: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : memref<?xf32>
// CHECK: %[[RESULT_MEMREF:.*]] = scf.if %[[PRED]] -> (memref<?xf32>) {
// CHECK: scf.yield %[[TRUE_MEMREF]] : memref<?xf32>
// CHECK: } else {
// CHECK: scf.yield %[[FALSE_MEMREF]] : memref<?xf32>
// CHECK: }
// CHECK: %[[RESULT_TENSOR:.*]] = memref.tensor_load %[[RESULT_MEMREF:.*]] : memref<?xf32>
// CHECK: %[[RESULT_TENSOR:.*]] = bufferization.to_tensor %[[RESULT_MEMREF:.*]] : memref<?xf32>
// CHECK: return %[[RESULT_TENSOR]] : tensor<?xf32>
// CHECK: }
func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) -> tensor<?xf32> {
Expand All @@ -27,11 +27,11 @@ func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) -> tens
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
// CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index,
// CHECK-SAME: %[[STEP:.*]]: index) -> tensor<f32> {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<f32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
// CHECK: %[[RESULT_MEMREF:.*]] = scf.for %[[VAL_6:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] iter_args(%[[ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
// CHECK: scf.yield %[[ITER]] : memref<f32>
// CHECK: }
// CHECK: %[[VAL_8:.*]] = memref.tensor_load %[[VAL_9:.*]] : memref<f32>
// CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[VAL_9:.*]] : memref<f32>
// CHECK: return %[[VAL_8]] : tensor<f32>
// CHECK: }
func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tensor<f32> {
Expand Down Expand Up @@ -60,14 +60,14 @@ func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor<f32>
// CHECK-LABEL: func @for_correct_recursive_legalization_behavior(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
// CHECK-SAME: %[[INDEX:.*]]: index) -> tensor<f32> {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<f32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
// CHECK: %[[RESULT:.*]] = scf.for %[[IV:.*]] = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
// CHECK: %[[TENSOR_ITER:.*]] = memref.tensor_load %[[MEMREF_ITER]] : memref<f32>
// CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref<f32>
// CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor<f32>) -> tensor<f32>
// CHECK: %[[MEMREF_MUNGED:.*]] = memref.buffer_cast %[[TENSOR_MUNGED]] : memref<f32>
// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : memref<f32>
// CHECK: scf.yield %[[MEMREF_MUNGED]] : memref<f32>
// CHECK: }
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[RESULT:.*]] : memref<f32>
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT:.*]] : memref<f32>
// CHECK: return %[[TENSOR]] : tensor<f32>
// CHECK: }
func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: index) -> tensor<f32> {
Expand All @@ -80,12 +80,12 @@ func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: in

// CHECK-LABEL: func @bufferize_while(
// CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor<f32>
// CHECK: %[[M:.*]] = memref.buffer_cast %[[ARG2]] : memref<f32>
// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : memref<f32>
// CHECK: %[[RES1:.*]]:3 = scf.while (%{{.*}} = %[[ARG0]], %{{.*}} = %[[M]]) : (i64, memref<f32>) -> (i64, i64, memref<f32>)
// CHECK: scf.condition(%{{.*}}) %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, memref<f32>
// CHECK: ^bb0(%{{.*}}: i64, %{{.*}}: i64, %{{.*}}: memref<f32>):
// CHECK: scf.yield %{{.*}}, %{{.*}} : i64, memref<f32>
// CHECK: %[[RES2:.*]] = memref.tensor_load %[[RES1]]#2 : memref<f32>
// CHECK: %[[RES2:.*]] = bufferization.to_tensor %[[RES1]]#2 : memref<f32>
// CHECK: return %[[RES1]]#1, %[[RES2]] : i64, tensor<f32>
func @bufferize_while(%arg0: i64, %arg1: i64, %arg2: tensor<f32>) -> (i64, tensor<f32>) {
%c2_i64 = arith.constant 2 : i64
Expand Down
16 changes: 8 additions & 8 deletions mlir/test/Dialect/SCF/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -564,12 +564,12 @@ func @last_value(%t0: tensor<128x128xf32>, %t1: tensor<128x128xf32>,
%lb : index, %ub : index, %step : index)
-> (tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>)
{
// CHECK-NEXT: %[[M1:.*]] = memref.buffer_cast %[[T1]] : memref<128x128xf32>
// CHECK-NEXT: %[[M1:.*]] = bufferization.to_memref %[[T1]] : memref<128x128xf32>
// CHECK-NEXT: %[[FOR_RES:.*]] = scf.for {{.*}} iter_args(%[[BBARG_T2:.*]] = %[[T2]]) -> (tensor<128x128xf32>) {
%0:3 = scf.for %arg0 = %lb to %ub step %step iter_args(%arg1 = %t0, %arg2 = %t1, %arg3 = %t2)
-> (tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>)
{
%m1 = memref.buffer_cast %arg2 : memref<128x128xf32>
%m1 = bufferization.to_memref %arg2 : memref<128x128xf32>

// CHECK-NEXT: call @process(%[[M0]]) : (memref<128x128xf32>) -> ()
call @process(%m0) : (memref<128x128xf32>) -> ()
Expand All @@ -579,22 +579,22 @@ func @last_value(%t0: tensor<128x128xf32>, %t1: tensor<128x128xf32>,

// This does not hoist (fails the bbArg has at most a single check).
// CHECK-NEXT: %[[T:.*]] = call @process_tensor(%[[BBARG_T2]]) : (tensor<128x128xf32>) -> memref<128x128xf32>
// CHECK-NEXT: %[[YIELD_T:.*]] = memref.tensor_load %[[T:.*]]
// CHECK-NEXT: %[[YIELD_T:.*]] = bufferization.to_tensor %[[T:.*]]
%m2 = call @process_tensor(%arg3): (tensor<128x128xf32>) -> memref<128x128xf32>
%3 = memref.tensor_load %m2 : memref<128x128xf32>
%3 = bufferization.to_tensor %m2 : memref<128x128xf32>

// All this stuff goes away, incrementally
%1 = memref.tensor_load %m0 : memref<128x128xf32>
%2 = memref.tensor_load %m1 : memref<128x128xf32>
%1 = bufferization.to_tensor %m0 : memref<128x128xf32>
%2 = bufferization.to_tensor %m1 : memref<128x128xf32>

// CHECK-NEXT: scf.yield %[[YIELD_T]] : tensor<128x128xf32>
scf.yield %1, %2, %3 : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>

// CHECK-NEXT: }
}

// CHECK-NEXT: %[[R0:.*]] = memref.tensor_load %[[M0]] : memref<128x128xf32>
// CHECK-NEXT: %[[R1:.*]] = memref.tensor_load %[[M1]] : memref<128x128xf32>
// CHECK-NEXT: %[[R0:.*]] = bufferization.to_tensor %[[M0]] : memref<128x128xf32>
// CHECK-NEXT: %[[R1:.*]] = bufferization.to_tensor %[[M1]] : memref<128x128xf32>
// CHECK-NEXT: return %[[R0]], %[[R1]], %[[FOR_RES]] : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>
return %0#0, %0#1, %0#2 : tensor<128x128xf32>, tensor<128x128xf32>, tensor<128x128xf32>
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Shape/bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
// CHECK: %[[WTRUE:.*]] = shape.const_witness true
// CHECK: %[[MEMREF:.*]] = shape.assuming %[[WTRUE]] -> (memref<2xf16>) {
// CHECK: %[[TENSOR_VAL:.*]] = "test.source"() : () -> tensor<2xf16>
// CHECK: %[[YIELDED_MEMREF:.*]] = memref.buffer_cast %[[TENSOR_VAL]] : memref<2xf16>
// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_memref %[[TENSOR_VAL]] : memref<2xf16>
// CHECK: shape.assuming_yield %[[YIELDED_MEMREF]] : memref<2xf16>
// CHECK: }
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[MEMREF:.*]] : memref<2xf16>
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF:.*]] : memref<2xf16>
// CHECK: "test.sink"(%[[TENSOR]]) : (tensor<2xf16>) -> ()
// CHECK: return
// CHECK: }
Expand Down
14 changes: 7 additions & 7 deletions mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<13xi32>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<13xi32>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
// CHECK: return %[[T]] : tensor<13xi32>
func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
%0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
Expand Down Expand Up @@ -86,7 +86,7 @@ func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32>
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<?xi32>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?xi32>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
// CHECK: return %[[T]] : tensor<?xi32>
func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
%0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
Expand Down Expand Up @@ -130,7 +130,7 @@ func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x4xf64>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<2x4xf64>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
// CHECK: return %[[T]] : tensor<2x4xf64>
func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
Expand Down Expand Up @@ -174,7 +174,7 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x4xf64>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?x4xf64>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
// CHECK: return %[[T]] : tensor<?x4xf64>
func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
Expand Down Expand Up @@ -218,7 +218,7 @@ func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x?xf64>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<2x?xf64>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
// CHECK: return %[[T]] : tensor<2x?xf64>
func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
Expand Down Expand Up @@ -262,7 +262,7 @@ func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x?xf64>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<?x?xf64>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
// CHECK: return %[[T]] : tensor<?x?xf64>
func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
Expand Down Expand Up @@ -311,7 +311,7 @@ func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x
// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]], %[[Iv2]]] : memref<2x3x4xf64>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<2x3x4xf64>
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
// CHECK: return %[[T]] : tensor<2x3x4xf64>
func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
%0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/SparseTensor/dense.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
// CHECK: %[[VAL_9:.*]] = memref.alloc() : memref<32x16xf32>
// CHECK: memref.copy %[[VAL_8]], %[[VAL_9]] : memref<32x16xf32> to memref<32x16xf32>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
Expand All @@ -53,7 +53,7 @@
// CHECK: memref.store %[[VAL_15]], %[[VAL_9]]{{\[}}%[[VAL_10]], %[[VAL_11]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_16:.*]] = memref.tensor_load %[[VAL_9]] : memref<32x16xf32>
// CHECK: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32x16xf32>
// CHECK: return %[[VAL_16]] : tensor<32x16xf32>
// CHECK: }
func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
Expand Down Expand Up @@ -84,7 +84,7 @@ func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
Expand All @@ -94,7 +94,7 @@ func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
// CHECK: memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_9]], %[[VAL_10]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_15:.*]] = memref.tensor_load %[[VAL_8]] : memref<32x16xf32>
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32x16xf32>
// CHECK: return %[[VAL_15]] : tensor<32x16xf32>
// CHECK: }
func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
Expand Down Expand Up @@ -124,7 +124,7 @@ func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32x16xf32>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16xf32>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
Expand Down Expand Up @@ -168,7 +168,7 @@ func @dense3(%arga: tensor<32x16xf32>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32x16x8xf32>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16x8xf32>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}}>> to memref<?xf32>
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
Expand Down
100 changes: 50 additions & 50 deletions mlir/test/Dialect/SparseTensor/sparse_1d.mlir

Large diffs are not rendered by default.

94 changes: 47 additions & 47 deletions mlir/test/Dialect/SparseTensor/sparse_2d.mlir

Large diffs are not rendered by default.

122 changes: 61 additions & 61 deletions mlir/test/Dialect/SparseTensor/sparse_3d.mlir

Large diffs are not rendered by default.

18 changes: 9 additions & 9 deletions mlir/test/Dialect/SparseTensor/sparse_affine.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<4xf32>
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<4xf32>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf32>
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32xf32>
// CHECK: memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xf32> to memref<32xf32>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<4xf32>
Expand All @@ -39,7 +39,7 @@
// CHECK: %[[VAL_20:.*]] = arith.addf %[[VAL_17]], %[[VAL_19]] : f32
// CHECK: memref.store %[[VAL_20]], %[[VAL_11]]{{\[}}%[[VAL_16]]] : memref<32xf32>
// CHECK: }
// CHECK: %[[VAL_21:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf32>
// CHECK: %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32>
// CHECK: return %[[VAL_21]] : tensor<32xf32>
// CHECK: }
func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
Expand Down Expand Up @@ -76,8 +76,8 @@ func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<34xi32>
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xi32>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34xi32>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi32>
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32xi32>
// CHECK: memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xi32> to memref<32xi32>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand All @@ -90,7 +90,7 @@ func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
// CHECK: %[[VAL_19:.*]] = arith.andi %[[VAL_16]], %[[VAL_18]] : i32
// CHECK: memref.store %[[VAL_19]], %[[VAL_11]]{{\[}}%[[VAL_15]]] : memref<32xi32>
// CHECK: }
// CHECK: %[[VAL_20:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xi32>
// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi32>
// CHECK: return %[[VAL_20]] : tensor<32xi32>
// CHECK: }
func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
Expand Down Expand Up @@ -128,8 +128,8 @@ func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<34x19xf64>
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34x19xf64>
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf64>
// CHECK: memref.copy %[[VAL_12]], %[[VAL_13]] : memref<32x16xf64> to memref<32x16xf64>
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_3]] {
Expand All @@ -148,7 +148,7 @@ func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
// CHECK: memref.store %[[VAL_26]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_19]]] : memref<32x16xf64>
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_27:.*]] = memref.tensor_load %[[VAL_13]] : memref<32x16xf64>
// CHECK: %[[VAL_27:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf64>
// CHECK: return %[[VAL_27]] : tensor<32x16xf64>
// CHECK: }
func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
Expand Down
38 changes: 19 additions & 19 deletions mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
Expand All @@ -48,7 +48,7 @@
// CHECK: %[[VAL_13:.*]] = math.abs %[[VAL_12]] : f64
// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = memref.tensor_load %[[VAL_7]] : memref<32xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
func @abs(%arga: tensor<32xf64, #SV>,
%argx: tensor<32xf64> {linalg.inplaceable = true}) -> tensor<32xf64> {
Expand All @@ -70,7 +70,7 @@ func @abs(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
Expand All @@ -79,7 +79,7 @@ func @abs(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_13:.*]] = math.ceil %[[VAL_12]] : f64
// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = memref.tensor_load %[[VAL_7]] : memref<32xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
func @ceil(%arga: tensor<32xf64, #SV>,
Expand All @@ -102,7 +102,7 @@ func @ceil(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
Expand All @@ -111,7 +111,7 @@ func @ceil(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_13:.*]] = math.floor %[[VAL_12]] : f64
// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = memref.tensor_load %[[VAL_7]] : memref<32xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
func @floor(%arga: tensor<32xf64, #SV>,
Expand All @@ -134,7 +134,7 @@ func @floor(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
Expand All @@ -143,7 +143,7 @@ func @floor(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_13:.*]] = arith.negf %[[VAL_12]] : f64
// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = memref.tensor_load %[[VAL_7]] : memref<32xf64>
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
// CHECK: return %[[VAL_14]] : tensor<32xf64>
// CHECK: }
func @neg(%arga: tensor<32xf64, #SV>,
Expand All @@ -169,8 +169,8 @@ func @neg(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
Expand Down Expand Up @@ -202,7 +202,7 @@ func @neg(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_30]]] : memref<32xf64>
// CHECK: memref.store %[[VAL_32]], %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_33:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK: return %[[VAL_33]] : tensor<32xf64>
// CHECK: }
func @add(%arga: tensor<32xf64, #SV>,
Expand All @@ -229,8 +229,8 @@ func @add(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
Expand Down Expand Up @@ -264,7 +264,7 @@ func @add(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_34:.*]] = arith.negf %[[VAL_33]] : f64
// CHECK: memref.store %[[VAL_34]], %[[VAL_11]]{{\[}}%[[VAL_31]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_35:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
// CHECK: %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK: return %[[VAL_35]] : tensor<32xf64>
// CHECK: }
func @sub(%arga: tensor<32xf64, #SV>,
Expand All @@ -289,8 +289,8 @@ func @sub(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
Expand All @@ -300,7 +300,7 @@ func @sub(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_16:.*]] = arith.mulf %[[VAL_14]], %[[VAL_15]] : f64
// CHECK: memref.store %[[VAL_16]], %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_17:.*]] = memref.tensor_load %[[VAL_9]] : memref<32xf64>
// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf64>
// CHECK: return %[[VAL_17]] : tensor<32xf64>
// CHECK: }
func @mul(%arga: tensor<32xf64, #SV>,
Expand All @@ -325,7 +325,7 @@ func @mul(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
Expand All @@ -334,7 +334,7 @@ func @mul(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_14:.*]] = arith.divf %[[VAL_13]], %[[VAL_2]] : f64
// CHECK: memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_15:.*]] = memref.tensor_load %[[VAL_8]] : memref<32xf64>
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf64>
// CHECK: return %[[VAL_15]] : tensor<32xf64>
// CHECK: }
func @divbyc(%arga: tensor<32xf64, #SV>,
Expand Down
56 changes: 28 additions & 28 deletions mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir

Large diffs are not rendered by default.

18 changes: 9 additions & 9 deletions mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<20x30xf32>
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<10x30xf32>
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30xf32>
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32>
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<10x30xf32>
// CHECK: memref.copy %[[VAL_12]], %[[VAL_13]] : memref<10x30xf32> to memref<10x30xf32>
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand All @@ -40,7 +40,7 @@
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_29:.*]] = memref.tensor_load %[[VAL_13]] : memref<10x30xf32>
// CHECK: %[[VAL_29:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<10x30xf32>
// CHECK: return %[[VAL_29]] : tensor<10x30xf32>
// CHECK: }
func @matmul(%a: tensor<10x20xf32, #DCSR>,
Expand All @@ -59,13 +59,13 @@ func @matmul(%a: tensor<10x20xf32, #DCSR>,
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 6 : index
// CHECK: %[[VAL_6:.*]] = memref.buffer_cast %[[VAL_0]] : memref<8x8xi32>
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32>
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<3x3xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<3x3xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<3x3xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<3x3xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<6x6xi32>
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x6xi32>
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<6x6xi32>
// CHECK: memref.copy %[[VAL_12]], %[[VAL_13]] : memref<6x6xi32> to memref<6x6xi32>
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]]] : memref<?xindex>
Expand All @@ -91,7 +91,7 @@ func @matmul(%a: tensor<10x20xf32, #DCSR>,
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_32:.*]] = memref.tensor_load %[[VAL_13]] : memref<6x6xi32>
// CHECK: %[[VAL_32:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<6x6xi32>
// CHECK: return %[[VAL_32]] : tensor<6x6xi32>
// CHECK: }
func @conv2d(%input: tensor<8x8xi32>,
Expand All @@ -111,13 +111,13 @@ func @conv2d(%input: tensor<8x8xi32>,
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 5 : index
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<5x3xi8>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8>
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<3x6xi8, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<3x6xi8, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_5]] : tensor<3x6xi8, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_5]] : tensor<3x6xi8, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<5x6xi64>
// CHECK: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<5x6xi64>
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<5x6xi64>
// CHECK: memref.copy %[[VAL_13]], %[[VAL_14]] : memref<5x6xi64> to memref<5x6xi64>
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
Expand All @@ -142,7 +142,7 @@ func @conv2d(%input: tensor<8x8xi32>,
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_33:.*]] = memref.tensor_load %[[VAL_14]] : memref<5x6xi64>
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<5x6xi64>
// CHECK: return %[[VAL_33]] : tensor<5x6xi64>
// CHECK: }
func @quantized_matmul(%input1: tensor<5x3xi8>,
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/SparseTensor/sparse_lower.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: %[[VAL_11:.*]] = memref.alloc() : memref<32xf64>
// CHECK-HIR: memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xf64> to memref<32xf64>
// CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
Expand All @@ -49,7 +49,7 @@
// CHECK-HIR: }
// CHECK-HIR: memref.store %[[VAL_17]], %[[VAL_11]]{{\[}}%[[VAL_12]]] : memref<32xf64>
// CHECK-HIR: }
// CHECK-HIR: %[[VAL_26:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
// CHECK-HIR: %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK-HIR: return %[[VAL_26]] : tensor<32xf64>
// CHECK-HIR: }

Expand All @@ -63,8 +63,8 @@
// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: %[[VAL_11:.*]] = memref.alloc() : memref<32xf64>
// CHECK-MIR: memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xf64> to memref<32xf64>
// CHECK-MIR: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
Expand All @@ -82,7 +82,7 @@
// CHECK-MIR: }
// CHECK-MIR: memref.store %[[VAL_19]], %[[VAL_11]]{{\[}}%[[VAL_14]]] : memref<32xf64>
// CHECK-MIR: }
// CHECK-MIR: %[[VAL_28:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
// CHECK-MIR: %[[VAL_28:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK-MIR: return %[[VAL_28]] : tensor<32xf64>
// CHECK-MIR: }

Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xf64>
// CHECK-HIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: %[[VAL_11:.*]] = memref.alloc() : memref<32xf64>
// CHECK-HIR: memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xf64> to memref<32xf64>
// CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
Expand All @@ -51,7 +51,7 @@
// CHECK-HIR: memref.store %[[VAL_22]], %[[VAL_11]]{{\[}}%[[VAL_18]]] : memref<32xf64>
// CHECK-HIR: }
// CHECK-HIR: }
// CHECK-HIR: %[[VAL_23:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
// CHECK-HIR: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64>
// CHECK-HIR: return %[[VAL_23]] : tensor<32xf64>
// CHECK-HIR: }

Expand All @@ -65,8 +65,8 @@
// CHECK-MIR: %[[VAL_7:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_8:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: %[[VAL_12:.*]] = memref.alloc() : memref<32xf64>
// CHECK-MIR: memref.copy %[[VAL_11]], %[[VAL_12]] : memref<32xf64> to memref<32xf64>
// CHECK-MIR: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
Expand All @@ -83,7 +83,7 @@
// CHECK-MIR: memref.store %[[VAL_25]], %[[VAL_12]]{{\[}}%[[VAL_21]]] : memref<32xf64>
// CHECK-MIR: }
// CHECK-MIR: }
// CHECK-MIR: %[[VAL_26:.*]] = memref.tensor_load %[[VAL_12]] : memref<32xf64>
// CHECK-MIR: %[[VAL_26:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf64>
// CHECK-MIR: return %[[VAL_26]] : tensor<32xf64>
// CHECK-MIR: }

Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-HIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-HIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
// CHECK-HIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
// CHECK-HIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index
Expand All @@ -47,7 +47,7 @@
// CHECK-HIR: }
// CHECK-HIR: memref.store %[[VAL_16]], %[[VAL_10]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK-HIR: }
// CHECK-HIR: %[[VAL_25:.*]] = memref.tensor_load %[[VAL_10]] : memref<32xf64>
// CHECK-HIR: %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf64>
// CHECK-HIR: return %[[VAL_25]] : tensor<32xf64>
// CHECK-HIR: }

Expand All @@ -61,8 +61,8 @@
// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK-MIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
// CHECK-MIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
// CHECK-MIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index
Expand All @@ -78,7 +78,7 @@
// CHECK-MIR: }
// CHECK-MIR: memref.store %[[VAL_16]], %[[VAL_10]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK-MIR: }
// CHECK-MIR: %[[VAL_25:.*]] = memref.tensor_load %[[VAL_10]] : memref<32xf64>
// CHECK-MIR: %[[VAL_25:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf64>
// CHECK-MIR: return %[[VAL_25]] : tensor<32xf64>
// CHECK-MIR: }

Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/SparseTensor/sparse_nd.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@
// CHECK: %[[VAL_10:.*]] = arith.constant 80 : index
// CHECK: %[[VAL_11:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_12:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xf32>
// CHECK: %[[VAL_19:.*]] = memref.buffer_cast %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: %[[VAL_20:.*]] = memref.alloc() : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: memref.copy %[[VAL_19]], %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32>
// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] {
Expand Down Expand Up @@ -81,7 +81,7 @@
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_50:.*]] = memref.tensor_load %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: %[[VAL_50:.*]] = bufferization.to_tensor %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
// CHECK: return %[[VAL_50]] : tensor<10x20x30x40x50x60x70x80xf32>
// CHECK: }
func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_perm.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<20x30x10xf32>
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30x10xf32>
// CHECK: %[[VAL_9:.*]] = memref.alloc() : memref<20x30x10xf32>
// CHECK: memref.copy %[[VAL_8]], %[[VAL_9]] : memref<20x30x10xf32> to memref<20x30x10xf32>
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
Expand All @@ -38,7 +38,7 @@
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_18:.*]] = memref.tensor_load %[[VAL_9]] : memref<20x30x10xf32>
// CHECK: %[[VAL_18:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<20x30x10xf32>
// CHECK: return %[[VAL_18]] : tensor<20x30x10xf32>
// CHECK: }
func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
Expand All @@ -62,7 +62,7 @@ func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?x?xf32>
// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32>
// CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_2]] : tensor<?x?x?xf32>
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<?x?x?xf32>
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?x?xf32>
// CHECK: %[[VAL_10:.*]] = memref.alloc(%[[VAL_6]], %[[VAL_7]], %[[VAL_8]]) : memref<?x?x?xf32>
// CHECK: memref.copy %[[VAL_9]], %[[VAL_10]] : memref<?x?x?xf32> to memref<?x?x?xf32>
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_4]] {
Expand All @@ -77,7 +77,7 @@ func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
// CHECK: }
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_19:.*]] = memref.tensor_load %[[VAL_10]] : memref<?x?x?xf32>
// CHECK: %[[VAL_19:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<?x?x?xf32>
// CHECK: return %[[VAL_19]] : tensor<?x?x?xf32>
// CHECK: }
func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
// CHECK-HIR: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK-HIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
// CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
// CHECK-HIR: %[[VAL_10:.*]] = memref.alloc() : memref<f32>
// CHECK-HIR: memref.copy %[[VAL_9]], %[[VAL_10]] : memref<f32> to memref<f32>
// CHECK-HIR: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref<f32>
Expand All @@ -46,7 +46,7 @@
// CHECK-HIR: scf.yield %[[VAL_15]] : f32
// CHECK-HIR: }
// CHECK-HIR: memref.store %[[VAL_12]], %[[VAL_10]][] : memref<f32>
// CHECK-HIR: %[[VAL_30:.*]] = memref.tensor_load %[[VAL_10]] : memref<f32>
// CHECK-HIR: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
// CHECK-HIR: return %[[VAL_30]] : tensor<f32>
// CHECK-HIR: }
//
Expand All @@ -60,7 +60,7 @@
// CHECK-MIR: %[[VAL_6:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_3]]) : (!llvm.ptr<i8>, index) -> index
// CHECK-MIR: %[[VAL_7:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_2]]) : (!llvm.ptr<i8>, index) -> index
// CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF32(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf32>
// CHECK-MIR: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
// CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
// CHECK-MIR: %[[VAL_10:.*]] = memref.alloc() : memref<f32>
// CHECK-MIR: memref.copy %[[VAL_9]], %[[VAL_10]] : memref<f32> to memref<f32>
// CHECK-MIR: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref<f32>
Expand All @@ -80,7 +80,7 @@
// CHECK-MIR: scf.yield %[[VAL_15]] : f32
// CHECK-MIR: }
// CHECK-MIR: memref.store %[[VAL_12]], %[[VAL_10]][] : memref<f32>
// CHECK-MIR: %[[VAL_30:.*]] = memref.tensor_load %[[VAL_10]] : memref<f32>
// CHECK-MIR: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
// CHECK-MIR: return %[[VAL_30]] : tensor<f32>
// CHECK-MIR: }
func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/SparseTensor/sparse_scalars.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref<?xf32>
// CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_4]] : memref<32x16xf32>
// CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
// CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : memref<32x16xf32>
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_14]][] : memref<f32>
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
Expand All @@ -56,7 +56,7 @@
// CHECK: memref.store %[[VAL_33]], %[[VAL_15]]{{\[}}%[[VAL_20]], %[[VAL_25]]] : memref<32x16xf32>
// CHECK: }
// CHECK: }
// CHECK: %[[VAL_34:.*]] = memref.tensor_load %[[VAL_15]] : memref<32x16xf32>
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x16xf32>
// CHECK: return %[[VAL_34]] : tensor<32x16xf32>
// CHECK: }
func @mul(%arga: tensor<32x16xf32, #SparseMatrix>,
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_8]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse_tensor.encoding<{{{.*}}}>>
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_0]] : memref<f64>
// CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f64>
// CHECK: %[[VAL_16:.*]] = tensor.extract %[[VAL_0]][] : tensor<f64>
// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_8]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (f64) {
// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
Expand Down Expand Up @@ -109,7 +109,7 @@
// CHECK: scf.yield %[[VAL_84]] : f64
// CHECK: }
// CHECK: memref.store %[[VAL_86:.*]], %[[VAL_15]][] : memref<f64>
// CHECK: %[[VAL_87:.*]] = memref.tensor_load %[[VAL_15]] : memref<f64>
// CHECK: %[[VAL_87:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<f64>
// CHECK: return %[[VAL_87]] : tensor<f64>
// CHECK: }
func @sparse_matrix_sum(%argx: tensor<f64> {linalg.inplaceable = true},
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/Standard/bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
// CHECK-SAME: %[[PRED:.*]]: i1,
// CHECK-SAME: %[[TRUE_VAL:.*]]: tensor<f32>,
// CHECK-SAME: %[[FALSE_VAL:.*]]: tensor<f32>) -> tensor<f32> {
// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = memref.buffer_cast %[[TRUE_VAL]] : memref<f32>
// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = memref.buffer_cast %[[FALSE_VAL]] : memref<f32>
// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : memref<f32>
// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : memref<f32>
// CHECK: %[[RET_MEMREF:.*]] = select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref<f32>
// CHECK: %[[RET:.*]] = memref.tensor_load %[[RET_MEMREF]] : memref<f32>
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref<f32>
// CHECK: return %[[RET]] : tensor<f32>
func @select(%arg0: i1, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
%0 = select %arg0, %arg1, %arg2 : tensor<f32>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Standard/func-bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func @call_sink(%arg0: tensor<f32>) {

// CHECK-LABEL: func @unconverted_op_in_body() -> memref<f32> {
// CHECK: %[[TENSOR:.*]] = "test.source"() : () -> tensor<f32>
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<f32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
// CHECK: return %[[MEMREF]] : memref<f32>
func @unconverted_op_in_body() -> tensor<f32> {
%0 = "test.source"() : () -> tensor<f32>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Standard/tensor-constant-bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
// CHECK: @basic
func @basic() -> tensor<3x4xf32> {
// CHECK: %[[MEMREF:.*]] = memref.get_global @__constant_3x4xf32 : memref<3x4xf32>
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[MEMREF]]
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF]]
%0 = arith.constant dense<7.0> : tensor<3x4xf32>
// CHECK: return %[[TENSOR]]
return %0 : tensor<3x4xf32>
Expand Down
24 changes: 12 additions & 12 deletions mlir/test/Dialect/Tensor/bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// CHECK-LABEL: func @dim(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
// CHECK-SAME: %[[INDEX:.*]]: index) -> index {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<f32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
// CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref<f32>
// CHECK: return %[[EXTENT]] : index
func @dim(%arg0: tensor<f32>, %arg1: index) -> index {
Expand All @@ -13,9 +13,9 @@ func @dim(%arg0: tensor<f32>, %arg1: index) -> index {

// CHECK-LABEL: func @tensor.cast(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xindex>) -> tensor<2xindex> {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]]
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]]
// CHECK: %[[CASTED:.*]] = memref.cast %[[MEMREF]] : memref<?xindex> to memref<2xindex>
// CHECK: %[[RET:.*]] = memref.tensor_load %[[CASTED]]
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED]]
// CHECK: return %[[RET]] : tensor<2xindex>
func @tensor.cast(%arg0: tensor<?xindex>) -> tensor<2xindex> {
%0 = tensor.cast %arg0 : tensor<?xindex> to tensor<2xindex>
Expand All @@ -24,9 +24,9 @@ func @tensor.cast(%arg0: tensor<?xindex>) -> tensor<2xindex> {

// CHECK-LABEL: func @tensor.cast_from_unranked(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> tensor<2xf32> {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<*xf32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<*xf32>
// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<*xf32> to memref<2xf32>
// CHECK: %[[RET:.*]] = memref.tensor_load %[[CASTED_MEMREF]] : memref<2xf32>
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<2xf32>
// CHECK: return %[[RET]] : tensor<2xf32>
func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> {
%0 = tensor.cast %arg0 : tensor<*xf32> to tensor<2xf32>
Expand All @@ -35,9 +35,9 @@ func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> {

// CHECK-LABEL: func @tensor.cast_to_unranked(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<2xf32>) -> tensor<*xf32> {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<2xf32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<2xf32>
// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<2xf32> to memref<*xf32>
// CHECK: %[[RET:.*]] = memref.tensor_load %[[CASTED_MEMREF]] : memref<*xf32>
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<*xf32>
// CHECK: return %[[RET]] : tensor<*xf32>
func @tensor.cast_to_unranked(%arg0: tensor<2xf32>) -> tensor<*xf32> {
%0 = tensor.cast %arg0 : tensor<2xf32> to tensor<*xf32>
Expand All @@ -47,7 +47,7 @@ func @tensor.cast_to_unranked(%arg0: tensor<2xf32>) -> tensor<*xf32> {
// CHECK-LABEL: func @tensor.extract(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xf32>,
// CHECK-SAME: %[[IDX:.*]]: index) -> f32 {
// CHECK: %[[MEMREF:.*]] = memref.buffer_cast %[[TENSOR]] : memref<?xf32>
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<?xf32>
// CHECK: %[[RET:.*]] = memref.load %[[MEMREF]][%[[IDX]]] : memref<?xf32>
// CHECK: return %[[RET]] : f32
// CHECK: }
Expand All @@ -64,7 +64,7 @@ func @tensor.extract(%arg0: tensor<?xf32>, %arg1: index) -> f32 {
// CHECK: store %[[ELEM0]], %[[MEMREF]][%[[C0]]]
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: store %[[ELEM1]], %[[MEMREF]][%[[C1]]]
// CHECK: %[[RET:.*]] = memref.tensor_load %[[MEMREF]]
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]]
// CHECK: return %[[RET]] : tensor<2xindex>
func @tensor.from_elements(%arg0: index, %arg1: index) -> tensor<2xindex> {
%0 = tensor.from_elements %arg0, %arg1 : tensor<2xindex>
Expand All @@ -74,7 +74,7 @@ func @tensor.from_elements(%arg0: index, %arg1: index) -> tensor<2xindex> {
// CHECK-LABEL: func @tensor.generate(
// CHECK-SAME: %[[ARG:.*]]: tensor<*xf32>,
// CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<?xindex> {
// CHECK: %[[CASTED:.*]] = memref.buffer_cast %[[ARG]] : memref<*xf32>
// CHECK: %[[CASTED:.*]] = bufferization.to_memref %[[ARG]] : memref<*xf32>
// CHECK: %[[MEMREF:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) : memref<?xindex>
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[C1:.*]] = arith.constant 1 : index
Expand All @@ -83,7 +83,7 @@ func @tensor.from_elements(%arg0: index, %arg1: index) -> tensor<2xindex> {
// CHECK: store %[[ELEM]], %[[MEMREF]][%[[I]]] : memref<?xindex>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[RET:.*]] = memref.tensor_load %[[MEMREF]] : memref<?xindex>
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] : memref<?xindex>
// CHECK: return %[[RET]] : tensor<?xindex>
// CHECK: }
func @tensor.generate(%arg: tensor<*xf32>, %dynamic_extent: index) -> tensor<?xindex> {
Expand All @@ -109,7 +109,7 @@ func @tensor.generate(%arg: tensor<*xf32>, %dynamic_extent: index) -> tensor<?xi
// CHECK: store %[[VAL_7]], %[[MEMREF]][%[[I]], %[[J]]] : memref<16x?xindex>
// CHECK: scf.yield
// CHECK: }
// CHECK: %[[RET:.*]] = memref.tensor_load %[[MEMREF]] : memref<16x?xindex>
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] : memref<16x?xindex>
// CHECK: return %[[RET]] : tensor<16x?xindex>
// CHECK: }
func @tensor.generate_static_and_dynamic(%arg0: index) -> tensor<16x?xindex> {
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/IR/core-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -440,18 +440,18 @@ func @test_splat_op(%s : f32) {
}

// CHECK-LABEL: func @tensor_load_store
func @tensor_load_store(%0 : memref<4x4xi32>) {
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[MEMREF:.*]] : memref<4x4xi32>
%1 = memref.tensor_load %0 : memref<4x4xi32>
func @tensor_load_store(%0 : memref<4x4xi32>, %1 : tensor<4x4xi32>) {
// CHECK-SAME: (%[[MEMREF:.*]]: memref<4x4xi32>,
// CHECK-SAME: %[[TENSOR:.*]]: tensor<4x4xi32>)
// CHECK: memref.tensor_store %[[TENSOR]], %[[MEMREF]] : memref<4x4xi32>
memref.tensor_store %1, %0 : memref<4x4xi32>
return
}

// CHECK-LABEL: func @unranked_tensor_load_store
func @unranked_tensor_load_store(%0 : memref<*xi32>) {
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[MEMREF:.*]] : memref<*xi32>
%1 = memref.tensor_load %0 : memref<*xi32>
func @unranked_tensor_load_store(%0 : memref<*xi32>, %1 : tensor<*xi32>) {
// CHECK-SAME: (%[[MEMREF:.*]]: memref<*xi32>,
// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xi32>)
// CHECK: memref.tensor_store %[[TENSOR]], %[[MEMREF]] : memref<*xi32>
memref.tensor_store %1, %0 : memref<*xi32>
return
Expand Down
20 changes: 10 additions & 10 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -195,31 +195,31 @@ module {
// CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
//
%c0 = call @sparse_cast_s32_to_f32(%1) : (tensor<10xi32, #SV>) -> tensor<10xf32>
%m0 = memref.buffer_cast %c0 : memref<10xf32>
%m0 = bufferization.to_memref %c0 : memref<10xf32>
%v0 = vector.transfer_read %m0[%z], %f: memref<10xf32>, vector<10xf32>
vector.print %v0 : vector<10xf32>

//
// CHECK: ( 4.29497e+09, 4.29497e+09, 4.29497e+09, 4.29497e+09, 0, 1, 2, 3, 4, 305 )
//
%c1 = call @sparse_cast_u32_to_f32(%1) : (tensor<10xi32, #SV>) -> tensor<10xf32>
%m1 = memref.buffer_cast %c1 : memref<10xf32>
%m1 = bufferization.to_memref %c1 : memref<10xf32>
%v1 = vector.transfer_read %m1[%z], %f: memref<10xf32>, vector<10xf32>
vector.print %v1 : vector<10xf32>

//
// CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
//
%c2 = call @sparse_cast_f32_to_s32(%3) : (tensor<10xf32, #SV>) -> tensor<10xi32>
%m2 = memref.buffer_cast %c2 : memref<10xi32>
%m2 = bufferization.to_memref %c2 : memref<10xi32>
%v2 = vector.transfer_read %m2[%z], %i: memref<10xi32>, vector<10xi32>
vector.print %v2 : vector<10xi32>

//
// CHECK: ( 4294967295, 4294967294, 4294967293, 4294967292, 0, 1, 2, 3, 4, 305 )
//
%c3 = call @sparse_cast_f64_to_u32(%7) : (tensor<10xf64, #SV>) -> tensor<10xi32>
%m3 = memref.buffer_cast %c3 : memref<10xi32>
%m3 = bufferization.to_memref %c3 : memref<10xi32>
%v3 = vector.transfer_read %m3[%z], %i: memref<10xi32>, vector<10xi32>
%vu = vector.bitcast %v3 : vector<10xi32> to vector<10xui32>
vector.print %vu : vector<10xui32>
Expand All @@ -228,47 +228,47 @@ module {
// CHECK: ( -4.4, -3.3, -2.2, -1.1, 0, 1.1, 2.2, 3.3, 4.4, 305.5 )
//
%c4 = call @sparse_cast_f32_to_f64(%3) : (tensor<10xf32, #SV>) -> tensor<10xf64>
%m4 = memref.buffer_cast %c4 : memref<10xf64>
%m4 = bufferization.to_memref %c4 : memref<10xf64>
%v4 = vector.transfer_read %m4[%z], %d: memref<10xf64>, vector<10xf64>
vector.print %v4 : vector<10xf64>

//
// CHECK: ( -4.4, -3.3, -2.2, -1.1, 0, 1.1, 2.2, 3.3, 4.4, 305.5 )
//
%c5 = call @sparse_cast_f64_to_f32(%5) : (tensor<10xf64, #SV>) -> tensor<10xf32>
%m5 = memref.buffer_cast %c5 : memref<10xf32>
%m5 = bufferization.to_memref %c5 : memref<10xf32>
%v5 = vector.transfer_read %m5[%z], %f: memref<10xf32>, vector<10xf32>
vector.print %v5 : vector<10xf32>

//
// CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
//
%c6 = call @sparse_cast_s32_to_u64(%1) : (tensor<10xi32, #SV>) -> tensor<10xi64>
%m6 = memref.buffer_cast %c6 : memref<10xi64>
%m6 = bufferization.to_memref %c6 : memref<10xi64>
%v6 = vector.transfer_read %m6[%z], %l: memref<10xi64>, vector<10xi64>
vector.print %v6 : vector<10xi64>

//
// CHECK: ( 4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3, 4, 305 )
//
%c7 = call @sparse_cast_u32_to_s64(%1) : (tensor<10xi32, #SV>) -> tensor<10xi64>
%m7 = memref.buffer_cast %c7 : memref<10xi64>
%m7 = bufferization.to_memref %c7 : memref<10xi64>
%v7 = vector.transfer_read %m7[%z], %l: memref<10xi64>, vector<10xi64>
vector.print %v7 : vector<10xi64>

//
// CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 49 )
//
%c8 = call @sparse_cast_i32_to_i8(%1) : (tensor<10xi32, #SV>) -> tensor<10xi8>
%m8 = memref.buffer_cast %c8 : memref<10xi8>
%m8 = bufferization.to_memref %c8 : memref<10xi8>
%v8 = vector.transfer_read %m8[%z], %b: memref<10xi8>, vector<10xi8>
vector.print %v8 : vector<10xi8>

//
// CHECK: ( -1064514355, -1068289229, -1072902963, -1081291571, 0, 1066192077, 1074580685, 1079194419, 1082969293, 1134084096 )
//
%c9 = call @sparse_cast_f32_as_s32(%3) : (tensor<10xf32, #SV>) -> tensor<10xi32>
%m9 = memref.buffer_cast %c9 : memref<10xi32>
%m9 = bufferization.to_memref %c9 : memref<10xi32>
%v9 = vector.transfer_read %m9[%z], %i: memref<10xi32>, vector<10xi32>
vector.print %v9 : vector<10xi32>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,56 +59,56 @@ module {
}
func @dumpAndRelease_234(%arg0: tensor<2x3x4xf64>) {
call @dump(%arg0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<2x3x4xf64>
%1 = bufferization.to_memref %arg0 : memref<2x3x4xf64>
memref.dealloc %1 : memref<2x3x4xf64>
return
}
func @dumpAndRelease_p34(%arg0: tensor<?x3x4xf64>) {
%0 = tensor.cast %arg0 : tensor<?x3x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<?x3x4xf64>
%1 = bufferization.to_memref %arg0 : memref<?x3x4xf64>
memref.dealloc %1 : memref<?x3x4xf64>
return
}
func @dumpAndRelease_2p4(%arg0: tensor<2x?x4xf64>) {
%0 = tensor.cast %arg0 : tensor<2x?x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<2x?x4xf64>
%1 = bufferization.to_memref %arg0 : memref<2x?x4xf64>
memref.dealloc %1 : memref<2x?x4xf64>
return
}
func @dumpAndRelease_23p(%arg0: tensor<2x3x?xf64>) {
%0 = tensor.cast %arg0 : tensor<2x3x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<2x3x?xf64>
%1 = bufferization.to_memref %arg0 : memref<2x3x?xf64>
memref.dealloc %1 : memref<2x3x?xf64>
return
}
func @dumpAndRelease_2pp(%arg0: tensor<2x?x?xf64>) {
%0 = tensor.cast %arg0 : tensor<2x?x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<2x?x?xf64>
%1 = bufferization.to_memref %arg0 : memref<2x?x?xf64>
memref.dealloc %1 : memref<2x?x?xf64>
return
}
func @dumpAndRelease_p3p(%arg0: tensor<?x3x?xf64>) {
%0 = tensor.cast %arg0 : tensor<?x3x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<?x3x?xf64>
%1 = bufferization.to_memref %arg0 : memref<?x3x?xf64>
memref.dealloc %1 : memref<?x3x?xf64>
return
}
func @dumpAndRelease_pp4(%arg0: tensor<?x?x4xf64>) {
%0 = tensor.cast %arg0 : tensor<?x?x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<?x?x4xf64>
%1 = bufferization.to_memref %arg0 : memref<?x?x4xf64>
memref.dealloc %1 : memref<?x?x4xf64>
return
}
func @dumpAndRelease_ppp(%arg0: tensor<?x?x?xf64>) {
%0 = tensor.cast %arg0 : tensor<?x?x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = memref.buffer_cast %arg0 : memref<?x?x?xf64>
%1 = bufferization.to_memref %arg0 : memref<?x?x?xf64>
memref.dealloc %1 : memref<?x?x?xf64>
return
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ module {
// CHECK-SAME: ( 0, 0, 3, 6, -3, -6 ),
// CHECK-SAME: ( 2, -1, 3, 0, -3, 0 ) )
//
%m = memref.buffer_cast %0 : memref<6x6xi32>
%m = bufferization.to_memref %0 : memref<6x6xi32>
%v = vector.transfer_read %m[%c0, %c0], %i0
: memref<6x6xi32>, vector<6x6xi32>
vector.print %v : vector<6x6xi32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ module {
memref.store %d0, %xdata[%i, %j] : memref<7x3xf64>
}
}
%x = memref.tensor_load %xdata : memref<7x3xf64>
%x = bufferization.to_tensor %xdata : memref<7x3xf64>

// Read the sparse tensor from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
Expand All @@ -106,7 +106,7 @@ module {
// CHECK: ( 0, 0, 0 )
// CHECK: ( 7, 0, 0 )
//
%r = memref.buffer_cast %0 : memref<7x3xf64>
%r = bufferization.to_memref %0 : memref<7x3xf64>
scf.for %i = %c0 to %c7 step %c1 {
%v = vector.transfer_read %r[%i, %c0], %d0: memref<7x3xf64>, vector<3xf64>
vector.print %v : vector<3xf64>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ module {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
%0 = memref.buffer_cast %dm : memref<?x?xf64>
%0 = bufferization.to_memref %dm : memref<?x?xf64>
%1 = vector.transfer_read %0[%c0, %c0], %d0: memref<?x?xf64>, vector<4x8xf64>
vector.print %1 : vector<4x8xf64>
memref.dealloc %0 : memref<?x?xf64>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ module {
scf.for %i = %c0 to %c4 step %c1 {
memref.store %i0, %xdata[%i] : memref<?xi32>
}
%b = memref.tensor_load %bdata : memref<?xi32>
%x = memref.tensor_load %xdata : memref<?xi32>
%b = bufferization.to_tensor %bdata : memref<?xi32>
%x = bufferization.to_tensor %xdata : memref<?xi32>

// Call kernel.
%0 = call @kernel_matvec(%a, %b, %x)
Expand All @@ -105,7 +105,7 @@ module {
//
// CHECK: ( 889, 1514, -21, -3431 )
//
%m = memref.buffer_cast %0 : memref<?xi32>
%m = bufferization.to_memref %0 : memref<?xi32>
%v = vector.transfer_read %m[%c0], %i0: memref<?xi32>, vector<4xi32>
vector.print %v : vector<4xi32>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ module {
memref.store %k, %cdata[%i, %j] : memref<?x?xf64>
}
}
%c = memref.tensor_load %cdata : memref<?x?xf64>
%c = bufferization.to_tensor %cdata : memref<?x?xf64>

%ddata = memref.alloc(%c4, %c5) : memref<?x?xf64>
scf.for %i = %c0 to %c4 step %c1 {
Expand All @@ -112,15 +112,15 @@ module {
memref.store %k, %ddata[%i, %j] : memref<?x?xf64>
}
}
%d = memref.tensor_load %ddata : memref<?x?xf64>
%d = bufferization.to_tensor %ddata : memref<?x?xf64>

%adata = memref.alloc(%c2, %c5) : memref<?x?xf64>
scf.for %i = %c0 to %c2 step %c1 {
scf.for %j = %c0 to %c5 step %c1 {
memref.store %i0, %adata[%i, %j] : memref<?x?xf64>
}
}
%a = memref.tensor_load %adata : memref<?x?xf64>
%a = bufferization.to_tensor %adata : memref<?x?xf64>

// Call kernel.
%0 = call @kernel_mttkrp(%b, %c, %d, %a)
Expand All @@ -132,7 +132,7 @@ module {
// CHECK: ( ( 16075, 21930, 28505, 35800, 43815 ),
// CHECK: ( 10000, 14225, 19180, 24865, 31280 ) )
//
%m = memref.buffer_cast %0 : memref<?x?xf64>
%m = bufferization.to_memref %0 : memref<?x?xf64>
%v = vector.transfer_read %m[%c0, %c0], %i0
: memref<?x?xf64>, vector<2x5xf64>
vector.print %v : vector<2x5xf64>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ module {
// CHECK-SAME: ( -254, 0, 256, -300, -30, -6 ),
// CHECK-SAME: ( 1397, 0, -1408, 100, 10, 33 ) )
//
%m = memref.buffer_cast %0 : memref<5x6xi32>
%m = bufferization.to_memref %0 : memref<5x6xi32>
%v = vector.transfer_read %m[%c0, %c0], %i0
: memref<5x6xi32>, vector<5x6xi32>
vector.print %v : vector<5x6xi32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,19 +201,19 @@ module {
// CHECK: 15
// CHECK: 10
//
%m0 = memref.buffer_cast %0 : memref<i32>
%m0 = bufferization.to_memref %0 : memref<i32>
call @dump_i32(%m0) : (memref<i32>) -> ()
%m1 = memref.buffer_cast %1 : memref<f32>
%m1 = bufferization.to_memref %1 : memref<f32>
call @dump_f32(%m1) : (memref<f32>) -> ()
%m2 = memref.buffer_cast %2 : memref<i32>
%m2 = bufferization.to_memref %2 : memref<i32>
call @dump_i32(%m2) : (memref<i32>) -> ()
%m3 = memref.buffer_cast %3 : memref<f32>
%m3 = bufferization.to_memref %3 : memref<f32>
call @dump_f32(%m3) : (memref<f32>) -> ()
%m4 = memref.buffer_cast %4 : memref<i32>
%m4 = bufferization.to_memref %4 : memref<i32>
call @dump_i32(%m4) : (memref<i32>) -> ()
%m5 = memref.buffer_cast %5 : memref<i32>
%m5 = bufferization.to_memref %5 : memref<i32>
call @dump_i32(%m5) : (memref<i32>) -> ()
%m6 = memref.buffer_cast %6 : memref<i32>
%m6 = bufferization.to_memref %6 : memref<i32>
call @dump_i32(%m6) : (memref<i32>) -> ()

// Release the resources.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ module {
memref.store %d, %bdata[%j, %i] : memref<?x?xf32>
}
}
%a = memref.tensor_load %adata : memref<?x?xf32>
%b = memref.tensor_load %bdata : memref<?x?xf32>
%x = memref.tensor_load %xdata : memref<?x?xf32>
%a = bufferization.to_tensor %adata : memref<?x?xf32>
%b = bufferization.to_tensor %bdata : memref<?x?xf32>
%x = bufferization.to_tensor %xdata : memref<?x?xf32>

// Read the sparse matrix from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
Expand All @@ -118,7 +118,7 @@ module {
// CHECK: ( 164, 0, 0, 640, 0 )
// CHECK: ( 0, 520, 0, 0, 1250 )
//
%r = memref.buffer_cast %0 : memref<?x?xf32>
%r = bufferization.to_memref %0 : memref<?x?xf32>
scf.for %i = %c0 to %c5 step %c1 {
%v = vector.transfer_read %r[%i, %c0], %d0: memref<?x?xf32>, vector<5xf32>
vector.print %v : vector<5xf32>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,8 @@ module {
// CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ),
// CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 192 ) )
//
%m0 = memref.buffer_cast %0 : memref<8x8xf64>
%m1 = memref.buffer_cast %1 : memref<8x8xf64>
%m0 = bufferization.to_memref %0 : memref<8x8xf64>
%m1 = bufferization.to_memref %1 : memref<8x8xf64>
%v0 = vector.transfer_read %m0[%c0, %c0], %d0
: memref<8x8xf64>, vector<8x8xf64>
%v1 = vector.transfer_read %m1[%c0, %c0], %d0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ module {
memref.store %i0, %xdata[%i, %j] : memref<?x?xf64>
}
}
%b = memref.tensor_load %bdata : memref<?x?xf64>
%x = memref.tensor_load %xdata : memref<?x?xf64>
%b = bufferization.to_tensor %bdata : memref<?x?xf64>
%x = bufferization.to_tensor %xdata : memref<?x?xf64>

// Call kernel.
%0 = call @kernel_spmm(%a, %b, %x)
Expand All @@ -108,7 +108,7 @@ module {
//
// CHECK: ( ( 3548, 3550, 3552, 3554 ), ( 6052, 6053, 6054, 6055 ), ( -56, -63, -70, -77 ), ( -13704, -13709, -13714, -13719 ) )
//
%m = memref.buffer_cast %0 : memref<?x?xf64>
%m = bufferization.to_memref %0 : memref<?x?xf64>
%v = vector.transfer_read %m[%c0, %c0], %i0: memref<?x?xf64>, vector<4x4xf64>
vector.print %v : vector<4x4xf64>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ module {
// initialized to zero.
%xdata = memref.alloc() : memref<f64>
memref.store %d0, %xdata[] : memref<f64>
%x = memref.tensor_load %xdata : memref<f64>
%x = bufferization.to_tensor %xdata : memref<f64>

// Read the sparse matrix from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
Expand All @@ -87,7 +87,7 @@ module {
//
// CHECK: 30.2
//
%m = memref.buffer_cast %0 : memref<f64>
%m = bufferization.to_memref %0 : memref<f64>
%v = memref.load %m[] : memref<f64>
vector.print %v : f64

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ module {
vector.print %1 : vector<16xf64>
// Dump the dense vector to verify structure is correct.
%dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
%2 = memref.buffer_cast %dv : memref<?xf64>
%2 = bufferization.to_memref %dv : memref<?xf64>
%3 = vector.transfer_read %2[%c0], %d0: memref<?xf64>, vector<32xf64>
vector.print %3 : vector<32xf64>
memref.dealloc %2 : memref<?xf64>
Expand All @@ -181,7 +181,7 @@ module {
// Setup memory for a single reduction scalar.
%xdata = memref.alloc() : memref<f64>
memref.store %d1, %xdata[] : memref<f64>
%x = memref.tensor_load %xdata : memref<f64>
%x = bufferization.to_tensor %xdata : memref<f64>

// Call sparse vector kernels.
%0 = call @vector_scale(%sv1)
Expand Down Expand Up @@ -228,7 +228,7 @@ module {
%m4 = sparse_tensor.values %4 : tensor<?xf64, #DenseVector> to memref<?xf64>
%v4 = vector.load %m4[%c0]: memref<?xf64>, vector<32xf64>
vector.print %v4 : vector<32xf64>
%m5 = memref.buffer_cast %5 : memref<f64>
%m5 = bufferization.to_memref %5 : memref<f64>
%v5 = memref.load %m5[] : memref<f64>
vector.print %v5 : f64

Expand Down
Loading