diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h index f39b6d0df1707..d80ba8ff56fd5 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -243,9 +243,7 @@ struct BufferizationOptions { /// additional buffer allocs and copies because layout maps cannot be casted /// away. /// - /// If `bufferizeFunctionBoundaries` is not set, this flag has no effect. If - /// `promoteBufferResultsToOutParams` is set, `kInferMostPreciseLayoutMap` is - /// is an invalid option. + /// If `bufferizeFunctionBoundaries` is not set, this flag has no effect. /// /// Note: Inferred layout maps may not be desireable when interacting with /// external functions, because the generated function signatures will be less @@ -285,10 +283,6 @@ struct BufferizationOptions { /// For debugging only. Should be used together with `testAnalysisOnly`. bool printConflicts = false; - /// If set to `true`, buffers that are returned from functions are replaced - /// with buffer "out" parameters. At the call site, new buffers are allocated. - bool promoteBufferResultsToOutParams = false; - /// If set to `true`, an `getAliasingOpResult` will return the corresponding /// "out"/"dest" OpOperand for every op that has the notion of an "out"/"dest" /// operand. I.e., the aliasing OpOperand of the i-th tensor OpResult is diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h index bb39073ae379a..faa94b437458d 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h @@ -88,10 +88,6 @@ BufferizationOptions getPartialBufferizationOptions(); LogicalResult bufferizeOp(Operation *op, BufferizationState &bufferizationState, const OpFilter *opFilter = nullptr); -/// Finalize all buffer allocations: Create alloc/dealloc ops as specified by -/// the bufferization options. -LogicalResult finalizeBuffers(Operation *op, - const BufferizationOptions &options); } // namespace bufferization } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td index 5c84e6eb5bf8c..b72780c965402 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -226,10 +226,6 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { argument is modified in-place. * Returning non-equivalent tensors is forbidden by default and must be explicitly activated with `allow-return-allocs`. - * Non-equivalent returned tensors of fully static size can be promoted to - function arguments with `promote-buffer-results-to-out-params`. In that - case, buffers for such tensors are allocated at each call site. Instead of - returning a buffer, the buffer contents are copied into these allocations. * External functions (without bodies) that return a tensor are not supported. * Function with multiple blocks or multiple ReturnOps are not supported. @@ -294,9 +290,6 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { /*default=*/"false", "Test only: Annotate IR with RaW conflicts. Requires " "test-analysis-only.">, - Option<"promoteBufferResultsToOutParams", - "promote-buffer-results-to-out-params", "bool", /*default=*/"false", - "Replace returned buffers (that were not dropped) with out params.">, Option<"unknownTypeConversion", "unknown-type-conversion", "std::string", /*default=*/"\"fully-dynamic-layout-map\"", "Controls layout maps for non-inferrable memref types.">, diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp index 2de4b417357c8..a2b6ad8029bee 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -191,7 +191,6 @@ struct OneShotBufferizePass opt.printConflicts = printConflicts; opt.testAnalysisOnly = testAnalysisOnly; opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries; - opt.promoteBufferResultsToOutParams = promoteBufferResultsToOutParams; opt.unknownTypeConversion = parseLayoutMapOption(unknownTypeConversion); OpFilter::Entry::FilterFn filterFn = @@ -291,18 +290,6 @@ static bool hasTensorSemantics(Operation *op) { return hasTensorResult || hasTensorOperand; } -LogicalResult -bufferization::finalizeBuffers(Operation *op, - const BufferizationOptions &options) { - // Promote returned buffers to "out" parameters. - // TODO: Pass options to support custom dealloc ops. - if (options.promoteBufferResultsToOutParams && isa(op) && - failed(promoteBufferResultsToOutParams(cast(op)))) - return failure(); - - return success(); -} - LogicalResult bufferization::bufferizeOp(Operation *op, const AnalysisState &analysisState) { // Catch incorrect API usage. @@ -314,8 +301,6 @@ LogicalResult bufferization::bufferizeOp(Operation *op, BufferizationState bufferizationState(analysisState); if (failed(bufferizeOp(op, bufferizationState))) return failure(); - if (failed(finalizeBuffers(op, analysisState.getOptions()))) - return failure(); return success(); } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp index f00097064cc02..df89f682fae38 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp @@ -472,10 +472,6 @@ LogicalResult mlir::bufferization::bufferizeModuleOp( } } - // Finalize all buffers. - if (failed(finalizeBuffers(moduleOp, options))) - return failure(); - // Post-pass cleanup of function argument attributes. moduleOp.walk([&](func::FuncOp op) { for (BlockArgument bbArg : op.getArguments()) diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir index 288710df98bcd..884f7c475eb77 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs promote-buffer-results-to-out-params function-boundary-type-conversion=fully-dynamic-layout-map" -buffer-deallocation -split-input-file | FileCheck %s -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs promote-buffer-results-to-out-params function-boundary-type-conversion=identity-layout-map" -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT // RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE // Note: function-boundary-type-conversion=infer-layout-map with @@ -17,7 +17,8 @@ // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK: memref.copy %[[arg0]], %[[alloc]] // CHECK: memref.store %{{.*}}, %[[alloc]] -// CHECK: memref.copy %[[alloc]], %[[arg1]] +// CHECK: %[[casted:.*]] = memref.cast %[[alloc]] +// CHECK: memref.copy %[[casted]], %[[arg1]] // CHECK: memref.dealloc %[[alloc]] // CHECK: return // CHECK: } @@ -53,7 +54,7 @@ func.func @callee(%t: tensor<5xf32>) -> (tensor<5xf32>, tensor<5xf32>) { // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<5xf32> to memref<5xf32, #[[$map1]]> // CHECK: call @callee(%[[arg0]], %[[casted]]) // CHECK: %[[l1:.*]] = memref.load %[[arg0]] -// CHECK: %[[l2:.*]] = memref.load %[[alloc]] +// CHECK: %[[l2:.*]] = memref.load %[[casted]] // CHECK: memref.dealloc %[[alloc]] // CHECK: return %[[l1]], %[[l2]] // CHECK: } @@ -79,7 +80,8 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) { // CHECK-SAME: %[[r:.*]]: memref<2x5xf32, #[[$map2a]]>) { // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10x20xf32> // CHECK: %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, #[[$map2b]]> -// CHECK: memref.copy %[[subview]], %[[r]] +// CHECK: %[[casted:.*]] = memref.cast %[[subview]] +// CHECK: memref.copy %[[casted]], %[[r]] // CHECK: memref.dealloc %[[alloc]] // CHECK-NO-LAYOUT-LABEL: func @callee( @@ -112,7 +114,7 @@ func.func @callee(%idx: index) -> tensor<2x5xf32> { // CHECK: %[[alloc:.*]] = memref.alloc() : memref<2x5xf32> // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<2x5xf32> to memref<2x5xf32, #[[$map2a]]> // CHECK: call @callee(%{{.*}}, %[[casted]]) -// CHECK: memref.load %[[alloc]] +// CHECK: memref.load %[[casted]] // CHECK: memref.dealloc %[[alloc]] // CHECK-NO-LAYOUT: func @main(