Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions mlir/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -217,24 +217,6 @@ set(MLIR_PDLL_TABLEGEN_TARGET "${MLIR_PDLL_TABLEGEN_TARGET}" CACHE INTERNAL "")
set(MLIR_SRC_SHARDER_TABLEGEN_EXE "${MLIR_SRC_SHARDER_TABLEGEN_EXE}" CACHE INTERNAL "")
set(MLIR_SRC_SHARDER_TABLEGEN_TARGET "${MLIR_SRC_SHARDER_TABLEGEN_TARGET}" CACHE INTERNAL "")

# XeGPU Dialect Option (Default OFF)
option(MLIR_DIALECT_XEGPU_ENABLE
"Enable the XeGPU dialect."
OFF)

if(MLIR_DIALECT_XEGPU_ENABLE)
add_compile_definitions(MLIR_DIALECT_XEGPU_ENABLE)
endif()

# TosaToTensor Conversion Option (Default OFF)
option(MLIR_CONVERSION_TOSATOTENSOR_ENABLE
"Enable TosaToTensor conversion"
OFF)

if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE)
add_compile_definitions(MLIR_CONVERSION_TOSATOTENSOR_ENABLE)
endif()

add_subdirectory(include/mlir)
add_subdirectory(lib)
# C API needs all dialects for registration, but should be built before tests.
Expand Down
14 changes: 7 additions & 7 deletions mlir/docs/Bufferization.md
Original file line number Diff line number Diff line change
Expand Up @@ -202,13 +202,13 @@ e.g.:
%2 = "my_dialect.yet_another_op"(%0) : (tensor<?xf32>) -> (tensor<?xf32>)
```

## Tensor / MemRef Boundary
## Tensor / Buffer Boundary

The bufferization dialect provides a few helper ops to connect tensor IR (that
should be bufferized) with existing buffers (that may be allocated/provided by
a different runtime/library/etc.).

`bufferization.to_memref %t` returns the future buffer of a tensor SSA value.
`bufferization.to_buffer %t` returns the future buffer of a tensor SSA value.
`bufferization.to_tensor %m` returns a tensor SSA value for a given MemRef
buffer. `bufferization.materialize_in_destination` indicates that a tensor value
should materialize in a certain buffer.
Expand Down Expand Up @@ -268,7 +268,7 @@ By default, One-Shot Bufferize fails when it encounters an op with tensor
semantics (i.e., tensor result or tensor operand) that is not bufferizable
(i.e., does not implement `BufferizableOpInterface`). This can be avoided with
`allow-unknown-ops`. In that case, One-Shot Bufferize inserts
`to_memref`/`to_tensor` ops around the bufferization boundary.
`to_buffer`/`to_tensor` ops around the bufferization boundary.

One-Shot Bufferize can be configured to bufferize only ops from a set of
dialects with `dialect-filter`.
Expand All @@ -291,7 +291,7 @@ memref. The layout map of the memref type can be controlled with

One-Shot Bufferize bufferizes ops from top to bottom. This works well when all
ops are bufferizable. However, when encountering a non-bufferizable tensor with
`allow-unknown-ops`, One-Shot Bufferize must insert `to_memref` ops at the
`allow-unknown-ops`, One-Shot Bufferize must insert `to_buffer` ops at the
bufferization boundary and decide on a memref type. By default, One-Shot
Bufferize choose the most dynamic memref type wrt. layout maps. E.g.:

Expand All @@ -300,12 +300,12 @@ Bufferize choose the most dynamic memref type wrt. layout maps. E.g.:
%1 = tensor.extract %0[%idx1, %idx2] : tensor<?xf32>
```

When bufferizing the above IR, One-Shot Bufferize inserts a `to_memref` ops with
When bufferizing the above IR, One-Shot Bufferize inserts a `to_buffer` ops with
dynamic offset and strides:

```mlir
%0 = "my_dialect.unbufferizable_op(%t) : (tensor<?x?xf32>) -> (tensor<?x?xf32>)
%0_m = bufferization.to_memref %0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
%0_m = bufferization.to_buffer %0 : memref<?x?xf32, strided<[?, ?], offset: ?>>
%1 = memref.load %0_m[%idx1, %idx2] : memref<?x?xf32, strided<[?, ?], offset: ?>>
```

Expand Down Expand Up @@ -335,7 +335,7 @@ generation of layout maps when no precise layout can be inferred:
* `identity-layout-map` uses static identity layout maps. This option can be
useful for legacy code that cannot handle memref types with layout maps.
Note that this setting can lead to additional buffer copies when folding a
`to_tensor`/`to_memref` pair with memref types that are not cast-compatible.
`to_tensor`/`to_buffer` pair with memref types that are not cast-compatible.

Note: The `unknown-type-conversion` option does not affect layout maps of
function signatures. There is a separate `function-signature-type-conversion`
Expand Down
6 changes: 0 additions & 6 deletions mlir/include/mlir/Conversion/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,4 @@ add_public_tablegen_target(MLIRConversionPassIncGen)

add_mlir_doc(Passes ConversionPasses ./ -gen-pass-doc)

if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE)
add_subdirectory(TosaToTensor)
endif()
if(MLIR_DIALECT_XEGPU_ENABLE)
add_subdirectory(VectorToXeGPU)
endif()
add_subdirectory(ConvertToLLVM)
4 changes: 0 additions & 4 deletions mlir/include/mlir/Conversion/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,19 +71,15 @@
#include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h"
#include "mlir/Conversion/TosaToMLProgram/TosaToMLProgram.h"
#include "mlir/Conversion/TosaToSCF/TosaToSCF.h"
#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE
#include "mlir/Conversion/TosaToTensor/TosaToTensor.h"
#endif
#include "mlir/Conversion/UBToLLVM/UBToLLVM.h"
#include "mlir/Conversion/UBToSPIRV/UBToSPIRV.h"
#include "mlir/Conversion/VectorToArmSME/VectorToArmSME.h"
#include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.h"
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
#include "mlir/Conversion/VectorToSPIRV/VectorToSPIRVPass.h"
#ifdef MLIR_DIALECT_XEGPU_ENABLE
#include "mlir/Conversion/VectorToXeGPU/VectorToXeGPU.h"
#endif

namespace mlir {

Expand Down
5 changes: 1 addition & 4 deletions mlir/include/mlir/Conversion/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -1259,7 +1259,7 @@ def TosaToSCF : Pass<"tosa-to-scf"> {
//===----------------------------------------------------------------------===//
// TosaToTensor
//===----------------------------------------------------------------------===//
#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE

def TosaToTensor : Pass<"tosa-to-tensor"> {
let summary = "Lower TOSA to the Tensor dialect";
let dependentDialects = [
Expand All @@ -1272,7 +1272,6 @@ def TosaToTensor : Pass<"tosa-to-tensor"> {

let constructor = "tosa::createTosaToTensor()";
}
#endif

//===----------------------------------------------------------------------===//
// UBToLLVM
Expand Down Expand Up @@ -1465,7 +1464,6 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> {
// VectorToXeGPU
//===----------------------------------------------------------------------===//

#ifdef MLIR_DIALECT_XEGPU_ENABLE
def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> {
let summary = "Lower the operations from the vector dialect into the XeGPU "
"dialect";
Expand All @@ -1475,6 +1473,5 @@ def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> {
"vector::VectorDialect", "xegpu::XeGPUDialect"
];
}
#endif

#endif // MLIR_CONVERSION_PASSES
7 changes: 0 additions & 7 deletions mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <optional>

#include "mlir/Dialect/Bufferization/IR/BufferizationEnums.h.inc"
#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h"

namespace mlir {
class OpBuilder;
Expand Down Expand Up @@ -259,15 +260,15 @@ struct BufferizationOptions {
std::function<LogicalResult(OpBuilder &, Location, Value, Value)>;
/// Initializer function for analysis state.
using AnalysisStateInitFn = std::function<void(AnalysisState &)>;
/// Tensor -> MemRef type converter.
/// Parameters: tensor type, memory space, func op, bufferization options
/// Tensor-like -> Buffer-like type conversion.
/// Parameters: tensor-like type, memory space, func op, bufferization options
using FunctionArgTypeConverterFn =
std::function<BaseMemRefType(TensorType, Attribute memorySpace,
std::function<BufferLikeType(TensorLikeType, Attribute memorySpace,
func::FuncOp, const BufferizationOptions &)>;
/// Tensor -> MemRef type converter.
/// Parameters: Value, memory space, bufferization options
/// Tensor -> MemRef type conversion.
/// Parameters: tensor type, memory space, bufferization options
using UnknownTypeConverterFn = std::function<BaseMemRefType(
Value, Attribute memorySpace, const BufferizationOptions &)>;
TensorType, Attribute memorySpace, const BufferizationOptions &)>;
// Produce a MemorySpace attribute from a tensor type
using DefaultMemorySpaceFn =
std::function<std::optional<Attribute>(TensorType t)>;
Expand Down Expand Up @@ -302,7 +303,7 @@ struct BufferizationOptions {
Value to) const;

/// Specifies whether not bufferizable ops are allowed in the input. If so,
/// bufferization.to_memref and bufferization.to_tensor ops are inserted at
/// bufferization.to_buffer and bufferization.to_tensor ops are inserted at
/// the boundaries.
bool allowUnknownOps = false;

Expand Down Expand Up @@ -344,10 +345,12 @@ struct BufferizationOptions {
/// predictable.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption);

/// Type converter from tensors to memrefs. This type converter is used to
/// determine bufferized function argument and result types. By default, a
/// type converter that returns a memref type with a fully dynamic layout map
/// is used.
/// Type conversion from tensors to buffers. This type conversion is used to
/// determine bufferized function argument and result types.
///
/// By default, if tensor is a (builtin) tensor type, it is converted to a
/// memref type with a fully dynamic layout map; if tensor is a (generic)
/// tensor-like type, it is converted using TensorLikeType::getBufferType().
///
/// If `bufferizeFunctionBoundaries` is not set, this function isn't used.
FunctionArgTypeConverterFn functionArgTypeConverterFn = nullptr;
Expand All @@ -359,10 +362,9 @@ struct BufferizationOptions {
/// If `bufferizeFunctionBoundaries` is not set, this flag has no effect.
bool inferFunctionResultLayout = true;

/// Type converter from tensors to memrefs. This type converter is used if no
/// memref type could be inferred during bufferization. By default, a type
/// converter that returns a memref type with a fully dynamic layout map is
/// used.
/// Type conversion from tensors to memrefs. This type conversion is used if
/// no memref type could be inferred during bufferization. By default, returns
/// a memref type with a fully dynamic layout map.
UnknownTypeConverterFn unknownTypeConverterFn = nullptr;

// Use during type conversion to determine the memory space for memref based
Expand Down Expand Up @@ -587,7 +589,7 @@ allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue,
bool copy = true);

/// Lookup the buffer for the given value. If the value was not bufferized
/// yet, wrap it in a ToMemrefOp. Otherwise, it is the result of a ToTensorOp,
/// yet, wrap it in a ToBufferOp. Otherwise, it is the result of a ToTensorOp,
/// from which the memref operand is returned.
FailureOr<Value> getBuffer(RewriterBase &rewriter, Value value,
const BufferizationOptions &options);
Expand All @@ -600,7 +602,7 @@ FailureOr<Value> getBuffer(RewriterBase &rewriter, Value value,
/// IR, this function can be used.
///
/// This function is a wrapper around BufferizableOpInterface::getBufferType.
FailureOr<BaseMemRefType> getBufferType(Value value,
FailureOr<BufferLikeType> getBufferType(Value value,
const BufferizationOptions &options);

/// Return the buffer type for a given Value (tensor) after bufferization
Expand All @@ -613,7 +615,7 @@ FailureOr<BaseMemRefType> getBufferType(Value value,
/// IR, this function can be used.
///
/// This function is a wrapper around `BufferizableOpInterface::getBufferType`.
FailureOr<BaseMemRefType> getBufferType(Value value,
FailureOr<BufferLikeType> getBufferType(Value value,
const BufferizationOptions &options,
SmallVector<Value> &invocationStack);

Expand All @@ -638,7 +640,7 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
return newOp;
}

/// Return a MemRefType to which the type of the given value can be bufferized.
/// Return a MemRefType to which the TensorType can be bufferized.
///
/// If possible, op bufferization implementations should not use this function
/// and instead infer precise memref types for tensor results by themselves.
Expand All @@ -650,7 +652,8 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
/// Note: Canonicalization patterns could clean up layout maps and infer more
/// precise layout maps after bufferization. However, many possible
/// canonicalizations are currently not implemented.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options,
BaseMemRefType getMemRefType(TensorType tensorType,
const BufferizationOptions &options,
MemRefLayoutAttrInterface layout = {},
Attribute memorySpace = nullptr);

Expand Down Expand Up @@ -693,7 +696,7 @@ AliasingOpOperandList defaultGetAliasingOpOperands(Value value,
/// This is the default implementation of
/// BufferizableOpInterface::getBufferType. Should not be called from other
/// places.
FailureOr<BaseMemRefType>
FailureOr<BufferLikeType>
defaultGetBufferType(Value value, const BufferizationOptions &options,
SmallVector<Value> &invocationStack);

Expand All @@ -720,6 +723,19 @@ AliasingValueList unknownGetAliasingValues(OpOperand &opOperand);
/// This is the default implementation of
/// BufferizableOpInterface::hasTensorSemantics
bool defaultHasTensorSemantics(Operation *op);

/// This is a helper function used when buffer type is guaranteed to be memref.
/// It performs two actions: failure state checking and an explicit llvm::cast<>
/// from the buffer-like type interface to a BaseMemRefType. This allows easier
/// management of differences in C++ types at the API boundaries. Valid buffer
/// type is casted to the memref type. Otherwise, the failure state is
/// propagated i.e. asMemRefType(mlir::failure()) returns mlir::failure().
FailureOr<BaseMemRefType> asMemRefType(FailureOr<BufferLikeType> bufferType);

/// This function is a free-standing helper that relies on
/// bufferization::TensorLikeTypeInterface to verify the types in tensor and
/// buffer worlds match.
bool typesMatchAfterBufferization(Operation &op, Value tensor, Value buffer);
} // namespace detail

} // namespace bufferization
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
Note: This interface method should never be called directly from user
code. Always use `bufferization::getBufferType`.
}],
/*retType=*/"::mlir::FailureOr<::mlir::BaseMemRefType>",
/*retType=*/"::mlir::FailureOr<::mlir::bufferization::BufferLikeType>",
/*methodName=*/"getBufferType",
/*args=*/(ins "::mlir::Value":$value,
"const ::mlir::bufferization::BufferizationOptions &":$options,
Expand Down
8 changes: 4 additions & 4 deletions mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ FailureOr<Value> castOrReallocMemRefValue(OpBuilder &b, Value value,
MemRefType type,
const BufferizationOptions &options);

/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the
/// to_memref op are different, a memref.cast is needed.
LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter,
ToMemrefOp toMemref,
/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the
/// to_buffer op are different, a memref.cast is needed.
LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter,
ToBufferOp toBuffer,
const BufferizationOptions &options);

/// Add the canonicalization patterns for bufferization.dealloc to the given
Expand Down
Loading
Loading