Skip to content

Commit

Permalink
Fix handling of rank-1 tensors in tosa.reduce_sum
Browse files Browse the repository at this point in the history
The conversion of `tosa.reduce_sum` to linalg creates a
`linalg.generic` op that produces a tensor of rank `input_rank -
1`. This tensor is then expanded back into a tensor of rank
`input_rank`. In the case where the tensor being expanded is rank-0,
the reassociation map used must be empty. However, the current
implementation indexes and modifies the reassociation map independent
of the rank of the tensor being expanded, resulting in out-of-bounds
indexing when the tensor being expanded is rank-0. This commit adds a
guard to the reassociation map indexing.

Reviewed By: jpienaar

Differential Revision: https://reviews.llvm.org/D135828
  • Loading branch information
ramiro050 authored and jpienaar committed Oct 13, 2022
1 parent 928c23d commit b4c8c49
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 3 deletions.
9 changes: 6 additions & 3 deletions mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -825,9 +825,12 @@ static LogicalResult reduceMatchAndRewriteHelper(Operation *op, uint64_t axis,
int32_t dimToPush = i > axis ? i + 1 : i;
reassociationMap[i].push_back(rewriter.getAffineDimExpr(dimToPush));
}
int32_t expandedDim = axis < expandInputRank ? axis : expandInputRank - 1;
reassociationMap[expandedDim].push_back(
rewriter.getAffineDimExpr(expandedDim + 1));

if (expandInputRank != 0) {
int32_t expandedDim = axis < expandInputRank ? axis : expandInputRank - 1;
reassociationMap[expandedDim].push_back(
rewriter.getAffineDimExpr(expandedDim + 1));
}

rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
op, resultTy, linalgOp.getResults()[0], reassociationMap);
Expand Down
20 changes: 20 additions & 0 deletions mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -777,6 +777,26 @@ func.func @reduce_float_dyn(%arg0: tensor<?x5x4xf32>) -> () {

// -----

// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> ()>

// CHECK-LABEL: @reduce_float_dyn_rank_1
// CHECK-SAME: %[[ARG0:[0-9a-zA-Z_]*]]: tensor<?xf32>
func.func @reduce_float_dyn_rank_1(%arg0: tensor<?xf32>) -> () {
// CHECK-DAG: %[[INIT:.+]] = tensor.empty() : tensor<f32>
// CHECK-DAG: %[[CST0:.+]] = arith.constant 0.0
// CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST0]]{{.*}}outs(%[[INIT]]
// CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["reduction"]} ins(%[[ARG0]] : tensor<?xf32>) outs(%[[FILL]] : tensor<f32>)
// CHECK: ^bb0(%[[ARG1:.*]]: f32, %[[ARG2:.*]]: f32)
// CHECK: %[[RES:.+]] = arith.addf %[[ARG1]], %[[ARG2]] : f32
// CHECK: linalg.yield %[[RES]] : f32
// CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}] : tensor<f32> into tensor<1xf32>
%0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<?xf32>) -> tensor<1xf32>
return
}

// -----

// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>

Expand Down

0 comments on commit b4c8c49

Please sign in to comment.