diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index 7b6256050e772..57d126603ebd7 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -2714,8 +2714,8 @@ FailureOr> SoftmaxOp::decomposeOperation(OpBuilder &b) { Value neutralForMaxFInit = b.create(loc, Value{neutralForMaxF}, outputReduce) .result(); - Value max = reduce(b, loc, input, neutralForMaxFInit, - reductionDim); + Value max = + reduce(b, loc, input, neutralForMaxFInit, reductionDim); // Step 2: Subtract max from input and exponentiate. Value numerator = buildSubAndExpOp(b, loc, input, max, output, reductionDim); diff --git a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir index ef0aca2cc366f..2e211d2fa7dbe 100644 --- a/mlir/test/Dialect/Linalg/transform-op-decompose.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-decompose.mlir @@ -215,7 +215,7 @@ func.func @softmax(%arg0: tensor<2x16x32xf32>, %dst: tensor<2x16x32xf32>) -> ten // CHECK: %[[D3:.+]] = linalg.generic {indexing_maps = [#[[$MAP]], #[[$MAP1]]], iterator_types = ["parallel", // CHECK-SAME: "parallel", "reduction"]} ins(%[[ARG0]] : tensor<2x16x32xf32>) outs(%[[D2]] : tensor<2x16xf32>) { // CHECK: ^bb0(%[[IN:.+]]: f32, %[[OUT:.+]]: f32): -// CHECK: %[[D8:.+]] = arith.maximumf %[[IN]], %[[OUT]] : f32 +// CHECK: %[[D8:.+]] = arith.maxnumf %[[IN]], %[[OUT]] : f32 // CHECK: linalg.yield %[[D8]] : f32 // CHECK: } -> tensor<2x16xf32> // CHECK: %[[D4:.+]] = linalg.generic {indexing_maps = [#[[$MAP]], #[[$MAP1]], #[[$MAP]]], iterator_types =