diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt index e7bd52c150d6..9e786154a2b4 100644 --- a/mlir/CMakeLists.txt +++ b/mlir/CMakeLists.txt @@ -217,24 +217,6 @@ set(MLIR_PDLL_TABLEGEN_TARGET "${MLIR_PDLL_TABLEGEN_TARGET}" CACHE INTERNAL "") set(MLIR_SRC_SHARDER_TABLEGEN_EXE "${MLIR_SRC_SHARDER_TABLEGEN_EXE}" CACHE INTERNAL "") set(MLIR_SRC_SHARDER_TABLEGEN_TARGET "${MLIR_SRC_SHARDER_TABLEGEN_TARGET}" CACHE INTERNAL "") -# XeGPU Dialect Option (Default OFF) -option(MLIR_DIALECT_XEGPU_ENABLE - "Enable the XeGPU dialect." - OFF) - -if(MLIR_DIALECT_XEGPU_ENABLE) - add_compile_definitions(MLIR_DIALECT_XEGPU_ENABLE) -endif() - -# TosaToTensor Conversion Option (Default OFF) -option(MLIR_CONVERSION_TOSATOTENSOR_ENABLE - "Enable TosaToTensor conversion" - OFF) - -if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) - add_compile_definitions(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) -endif() - add_subdirectory(include/mlir) add_subdirectory(lib) # C API needs all dialects for registration, but should be built before tests. diff --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md index 02cfee5f2b8d..e04934a120a0 100644 --- a/mlir/docs/Bufferization.md +++ b/mlir/docs/Bufferization.md @@ -202,13 +202,13 @@ e.g.: %2 = "my_dialect.yet_another_op"(%0) : (tensor) -> (tensor) ``` -## Tensor / MemRef Boundary +## Tensor / Buffer Boundary The bufferization dialect provides a few helper ops to connect tensor IR (that should be bufferized) with existing buffers (that may be allocated/provided by a different runtime/library/etc.). -`bufferization.to_memref %t` returns the future buffer of a tensor SSA value. +`bufferization.to_buffer %t` returns the future buffer of a tensor SSA value. `bufferization.to_tensor %m` returns a tensor SSA value for a given MemRef buffer. `bufferization.materialize_in_destination` indicates that a tensor value should materialize in a certain buffer. @@ -268,7 +268,7 @@ By default, One-Shot Bufferize fails when it encounters an op with tensor semantics (i.e., tensor result or tensor operand) that is not bufferizable (i.e., does not implement `BufferizableOpInterface`). This can be avoided with `allow-unknown-ops`. In that case, One-Shot Bufferize inserts -`to_memref`/`to_tensor` ops around the bufferization boundary. +`to_buffer`/`to_tensor` ops around the bufferization boundary. One-Shot Bufferize can be configured to bufferize only ops from a set of dialects with `dialect-filter`. @@ -291,7 +291,7 @@ memref. The layout map of the memref type can be controlled with One-Shot Bufferize bufferizes ops from top to bottom. This works well when all ops are bufferizable. However, when encountering a non-bufferizable tensor with -`allow-unknown-ops`, One-Shot Bufferize must insert `to_memref` ops at the +`allow-unknown-ops`, One-Shot Bufferize must insert `to_buffer` ops at the bufferization boundary and decide on a memref type. By default, One-Shot Bufferize choose the most dynamic memref type wrt. layout maps. E.g.: @@ -300,12 +300,12 @@ Bufferize choose the most dynamic memref type wrt. layout maps. E.g.: %1 = tensor.extract %0[%idx1, %idx2] : tensor ``` -When bufferizing the above IR, One-Shot Bufferize inserts a `to_memref` ops with +When bufferizing the above IR, One-Shot Bufferize inserts a `to_buffer` ops with dynamic offset and strides: ```mlir %0 = "my_dialect.unbufferizable_op(%t) : (tensor) -> (tensor) -%0_m = bufferization.to_memref %0 : memref> +%0_m = bufferization.to_buffer %0 : memref> %1 = memref.load %0_m[%idx1, %idx2] : memref> ``` @@ -335,7 +335,7 @@ generation of layout maps when no precise layout can be inferred: * `identity-layout-map` uses static identity layout maps. This option can be useful for legacy code that cannot handle memref types with layout maps. Note that this setting can lead to additional buffer copies when folding a - `to_tensor`/`to_memref` pair with memref types that are not cast-compatible. + `to_tensor`/`to_buffer` pair with memref types that are not cast-compatible. Note: The `unknown-type-conversion` option does not affect layout maps of function signatures. There is a separate `function-signature-type-conversion` diff --git a/mlir/include/mlir/Conversion/CMakeLists.txt b/mlir/include/mlir/Conversion/CMakeLists.txt index 0f452883c5f3..9f76ab659215 100644 --- a/mlir/include/mlir/Conversion/CMakeLists.txt +++ b/mlir/include/mlir/Conversion/CMakeLists.txt @@ -7,10 +7,4 @@ add_public_tablegen_target(MLIRConversionPassIncGen) add_mlir_doc(Passes ConversionPasses ./ -gen-pass-doc) -if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) - add_subdirectory(TosaToTensor) -endif() -if(MLIR_DIALECT_XEGPU_ENABLE) - add_subdirectory(VectorToXeGPU) -endif() add_subdirectory(ConvertToLLVM) diff --git a/mlir/include/mlir/Conversion/Passes.h b/mlir/include/mlir/Conversion/Passes.h index 5e22bf0b2946..e9761c20642c 100644 --- a/mlir/include/mlir/Conversion/Passes.h +++ b/mlir/include/mlir/Conversion/Passes.h @@ -71,9 +71,7 @@ #include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h" #include "mlir/Conversion/TosaToMLProgram/TosaToMLProgram.h" #include "mlir/Conversion/TosaToSCF/TosaToSCF.h" -#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE #include "mlir/Conversion/TosaToTensor/TosaToTensor.h" -#endif #include "mlir/Conversion/UBToLLVM/UBToLLVM.h" #include "mlir/Conversion/UBToSPIRV/UBToSPIRV.h" #include "mlir/Conversion/VectorToArmSME/VectorToArmSME.h" @@ -81,9 +79,7 @@ #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.h" #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" #include "mlir/Conversion/VectorToSPIRV/VectorToSPIRVPass.h" -#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Conversion/VectorToXeGPU/VectorToXeGPU.h" -#endif namespace mlir { diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 08007790b6a0..4cd6c17e3379 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -1259,7 +1259,7 @@ def TosaToSCF : Pass<"tosa-to-scf"> { //===----------------------------------------------------------------------===// // TosaToTensor //===----------------------------------------------------------------------===// -#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE + def TosaToTensor : Pass<"tosa-to-tensor"> { let summary = "Lower TOSA to the Tensor dialect"; let dependentDialects = [ @@ -1272,7 +1272,6 @@ def TosaToTensor : Pass<"tosa-to-tensor"> { let constructor = "tosa::createTosaToTensor()"; } -#endif //===----------------------------------------------------------------------===// // UBToLLVM @@ -1465,7 +1464,6 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> { // VectorToXeGPU //===----------------------------------------------------------------------===// -#ifdef MLIR_DIALECT_XEGPU_ENABLE def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> { let summary = "Lower the operations from the vector dialect into the XeGPU " "dialect"; @@ -1475,6 +1473,5 @@ def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> { "vector::VectorDialect", "xegpu::XeGPUDialect" ]; } -#endif #endif // MLIR_CONVERSION_PASSES diff --git a/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt b/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt deleted file mode 100644 index 3290ac88dae6..000000000000 --- a/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) - set(LLVM_TARGET_DEFINITIONS Passes.td) - mlir_tablegen(Passes.h.inc -gen-pass-decls -name TosaToTensor) - mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix TosaToTensor) - mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix TosaToTensor) - add_public_tablegen_target(MLIRTosaToTensorPassIncGen) -endif() diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h index d1a102e2a6e4..d1aac88a646e 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -17,6 +17,7 @@ #include #include "mlir/Dialect/Bufferization/IR/BufferizationEnums.h.inc" +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h" namespace mlir { class OpBuilder; @@ -259,15 +260,15 @@ struct BufferizationOptions { std::function; /// Initializer function for analysis state. using AnalysisStateInitFn = std::function; - /// Tensor -> MemRef type converter. - /// Parameters: tensor type, memory space, func op, bufferization options + /// Tensor-like -> Buffer-like type conversion. + /// Parameters: tensor-like type, memory space, func op, bufferization options using FunctionArgTypeConverterFn = - std::function; - /// Tensor -> MemRef type converter. - /// Parameters: Value, memory space, bufferization options + /// Tensor -> MemRef type conversion. + /// Parameters: tensor type, memory space, bufferization options using UnknownTypeConverterFn = std::function; + TensorType, Attribute memorySpace, const BufferizationOptions &)>; // Produce a MemorySpace attribute from a tensor type using DefaultMemorySpaceFn = std::function(TensorType t)>; @@ -302,7 +303,7 @@ struct BufferizationOptions { Value to) const; /// Specifies whether not bufferizable ops are allowed in the input. If so, - /// bufferization.to_memref and bufferization.to_tensor ops are inserted at + /// bufferization.to_buffer and bufferization.to_tensor ops are inserted at /// the boundaries. bool allowUnknownOps = false; @@ -344,10 +345,12 @@ struct BufferizationOptions { /// predictable. void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption); - /// Type converter from tensors to memrefs. This type converter is used to - /// determine bufferized function argument and result types. By default, a - /// type converter that returns a memref type with a fully dynamic layout map - /// is used. + /// Type conversion from tensors to buffers. This type conversion is used to + /// determine bufferized function argument and result types. + /// + /// By default, if tensor is a (builtin) tensor type, it is converted to a + /// memref type with a fully dynamic layout map; if tensor is a (generic) + /// tensor-like type, it is converted using TensorLikeType::getBufferType(). /// /// If `bufferizeFunctionBoundaries` is not set, this function isn't used. FunctionArgTypeConverterFn functionArgTypeConverterFn = nullptr; @@ -359,10 +362,9 @@ struct BufferizationOptions { /// If `bufferizeFunctionBoundaries` is not set, this flag has no effect. bool inferFunctionResultLayout = true; - /// Type converter from tensors to memrefs. This type converter is used if no - /// memref type could be inferred during bufferization. By default, a type - /// converter that returns a memref type with a fully dynamic layout map is - /// used. + /// Type conversion from tensors to memrefs. This type conversion is used if + /// no memref type could be inferred during bufferization. By default, returns + /// a memref type with a fully dynamic layout map. UnknownTypeConverterFn unknownTypeConverterFn = nullptr; // Use during type conversion to determine the memory space for memref based @@ -587,7 +589,7 @@ allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, bool copy = true); /// Lookup the buffer for the given value. If the value was not bufferized -/// yet, wrap it in a ToMemrefOp. Otherwise, it is the result of a ToTensorOp, +/// yet, wrap it in a ToBufferOp. Otherwise, it is the result of a ToTensorOp, /// from which the memref operand is returned. FailureOr getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options); @@ -600,7 +602,7 @@ FailureOr getBuffer(RewriterBase &rewriter, Value value, /// IR, this function can be used. /// /// This function is a wrapper around BufferizableOpInterface::getBufferType. -FailureOr getBufferType(Value value, +FailureOr getBufferType(Value value, const BufferizationOptions &options); /// Return the buffer type for a given Value (tensor) after bufferization @@ -613,7 +615,7 @@ FailureOr getBufferType(Value value, /// IR, this function can be used. /// /// This function is a wrapper around `BufferizableOpInterface::getBufferType`. -FailureOr getBufferType(Value value, +FailureOr getBufferType(Value value, const BufferizationOptions &options, SmallVector &invocationStack); @@ -638,7 +640,7 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op, return newOp; } -/// Return a MemRefType to which the type of the given value can be bufferized. +/// Return a MemRefType to which the TensorType can be bufferized. /// /// If possible, op bufferization implementations should not use this function /// and instead infer precise memref types for tensor results by themselves. @@ -650,7 +652,8 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op, /// Note: Canonicalization patterns could clean up layout maps and infer more /// precise layout maps after bufferization. However, many possible /// canonicalizations are currently not implemented. -BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, +BaseMemRefType getMemRefType(TensorType tensorType, + const BufferizationOptions &options, MemRefLayoutAttrInterface layout = {}, Attribute memorySpace = nullptr); @@ -693,7 +696,7 @@ AliasingOpOperandList defaultGetAliasingOpOperands(Value value, /// This is the default implementation of /// BufferizableOpInterface::getBufferType. Should not be called from other /// places. -FailureOr +FailureOr defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector &invocationStack); @@ -720,6 +723,19 @@ AliasingValueList unknownGetAliasingValues(OpOperand &opOperand); /// This is the default implementation of /// BufferizableOpInterface::hasTensorSemantics bool defaultHasTensorSemantics(Operation *op); + +/// This is a helper function used when buffer type is guaranteed to be memref. +/// It performs two actions: failure state checking and an explicit llvm::cast<> +/// from the buffer-like type interface to a BaseMemRefType. This allows easier +/// management of differences in C++ types at the API boundaries. Valid buffer +/// type is casted to the memref type. Otherwise, the failure state is +/// propagated i.e. asMemRefType(mlir::failure()) returns mlir::failure(). +FailureOr asMemRefType(FailureOr bufferType); + +/// This function is a free-standing helper that relies on +/// bufferization::TensorLikeTypeInterface to verify the types in tensor and +/// buffer worlds match. +bool typesMatchAfterBufferization(Operation &op, Value tensor, Value buffer); } // namespace detail } // namespace bufferization diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td index 95022d7d665d..1de1742fab81 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td @@ -518,7 +518,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> { Note: This interface method should never be called directly from user code. Always use `bufferization::getBufferType`. }], - /*retType=*/"::mlir::FailureOr<::mlir::BaseMemRefType>", + /*retType=*/"::mlir::FailureOr<::mlir::bufferization::BufferLikeType>", /*methodName=*/"getBufferType", /*args=*/(ins "::mlir::Value":$value, "const ::mlir::bufferization::BufferizationOptions &":$options, diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h index 6f19dca2e822..1ef537080295 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h @@ -56,10 +56,10 @@ FailureOr castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options); -/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the -/// to_memref op are different, a memref.cast is needed. -LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, - ToMemrefOp toMemref, +/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the +/// to_buffer op are different, a memref.cast is needed. +LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter, + ToBufferOp toBuffer, const BufferizationOptions &options); /// Add the canonicalization patterns for bufferization.dealloc to the given diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td index fad78a63444b..cfcee3503a6d 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -12,6 +12,7 @@ include "mlir/Dialect/Bufferization/IR/AllocationOpInterface.td" include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td" include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td" +include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td" include "mlir/Dialect/Bufferization/IR/BufferizationBase.td" include "mlir/Interfaces/DestinationStyleOpInterface.td" include "mlir/Interfaces/InferTypeOpInterface.td" @@ -109,7 +110,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor", AliasingValueList getAliasingValues( OpOperand &opOperand, const AnalysisState &state); - FailureOr getBufferType( + FailureOr getBufferType( Value value, const BufferizationOptions &options, SmallVector &invocationStack); @@ -383,20 +384,31 @@ def Bufferization_DeallocTensorOp : Bufferization_Op<"dealloc_tensor", // ToTensorOp //===----------------------------------------------------------------------===// +class Bufferization_TensorAndBufferMatch : PredOpTrait< + "specified tensor and buffer types match", + CPred< + "::mlir::bufferization::detail::typesMatchAfterBufferization(" + "$_op, $" # tensor # ", $" # buffer #")" + > +>; + def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ BufferizableOpInterface, SameOperandsAndResultShape, SameOperandsAndResultElementType, - AllElementTypesMatch<["memref", "result"]> + Bufferization_TensorAndBufferMatch<"result", "buffer"> ]> { - let summary = "create a tensor from a `memref`"; + let summary = "create a buffer-like type from a tensor-like type"; let description = [{ - An operation that creates a tensor from a `memref`. The result value is a - tensor whose shape and element type match the memref operand. + An operation that creates a tensor from a buffer. The result value is a + tensor-like type that must match the corresponding buffer-like operand as + per TensorLikeType::verifyCompatibleBufferType(). For builtins (TensorType + and BaseMemRefType), this means that shapes and element types match between + the tensor and the buffer. - The opposite of this op is `to_memref`. Together, these two ops are + The opposite of this op is `to_buffer`. Together, these two ops are useful for source/target materializations when doing type conversions - involving tensors and memrefs. + involving tensors and buffers. Example: @@ -438,19 +450,16 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ away. However, such IR is no longer bufferizable with One-Shot Bufferize. }]; - let arguments = (ins Arg]>:$memref, + [MemReadAt<0, FullEffect>]>:$buffer, UnitAttr:$restrict, UnitAttr:$writable); - let results = (outs AnyTensor:$result); + let results = (outs Bufferization_TensorLikeTypeInterface:$result); let extraClassDeclaration = [{ /// The result of a to_tensor is always a tensor. - TensorType getType() { - Type resultType = getResult().getType(); - if (::llvm::isa(resultType)) - return ::llvm::cast(resultType); - return {}; + ::mlir::bufferization::TensorLikeType getType() { + return getResult().getType(); } //===------------------------------------------------------------------===// @@ -459,55 +468,47 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ LogicalResult bufferize(RewriterBase &rewriter, const BufferizationOptions &options) const { - // to_tensor/to_memref pairs fold away after bufferization. + // to_tensor/to_buffer pairs fold away after bufferization. return success(); } bool isWritable(Value value, const AnalysisState &state); - FailureOr getBufferType( + FailureOr getBufferType( Value value, const BufferizationOptions &options, SmallVector &invocationStack) { - return ::llvm::cast(getMemref().getType()); + return getBuffer().getType(); } }]; let assemblyFormat = [{ - $memref (`restrict` $restrict^)? (`writable` $writable^)? attr-dict - `:` type($memref) `to` type($result) + $buffer (`restrict` $restrict^)? (`writable` $writable^)? attr-dict + `:` type($buffer) `to` type($result) }]; - let builders = [ - OpBuilder<(ins "Value":$memref, CArg<"bool", "false">:$restrict, CArg<"bool", "false">:$writeable), [{ - auto rtt = memref::getTensorTypeFromMemRefType(memref.getType()); - build($_builder, $_state, rtt, memref, restrict, writeable); - }]> - ]; - let hasCanonicalizer = 1; let hasFolder = 1; } //===----------------------------------------------------------------------===// -// ToMemrefOp +// ToBufferOp //===----------------------------------------------------------------------===// -def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ +def Bufferization_ToBufferOp : Bufferization_Op<"to_buffer", [ BufferizableOpInterface, SameOperandsAndResultShape, SameOperandsAndResultElementType, Pure, - AllShapesMatch<["memref", "tensor"]>, - AllElementTypesMatch<["memref", "tensor"]> + Bufferization_TensorAndBufferMatch<"tensor", "buffer"> ]> { - let summary = "cast a tensor to memref"; + let summary = "cast a tensor-like type to buffer-like type"; let description = [{ An operation that returns the future buffer of a `tensor`. ```mlir // Result type is memref<4x?xf32, #layout, 0> - %m = bufferization.to_memref %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0> + %m = bufferization.to_buffer %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0> ``` This operation is a specialized variant of the built-in @@ -519,15 +520,15 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ the returned buffer) will not be written to. }]; - let arguments = (ins AnyTensor:$tensor, UnitAttr:$read_only); - let results = (outs AnyRankedOrUnrankedMemRef:$memref); + let arguments = (ins Bufferization_TensorLikeTypeInterface:$tensor, UnitAttr:$read_only); + let results = (outs Bufferization_BufferLikeTypeInterface:$buffer); let extraClassDeclaration = [{ //===------------------------------------------------------------------===// // BufferizableOpInterface implementation //===------------------------------------------------------------------===// - // Note: ToMemrefOp / ToTensorOp are temporary ops that are inserted at the + // Note: ToBufferOp / ToTensorOp are temporary ops that are inserted at the // bufferization boundary. When One-Shot bufferization is complete, there // should be no such ops left over. If `allowUnknownOps` (or after running a // partial bufferization pass), such ops may be part of the resulting IR, @@ -554,7 +555,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ }]; let assemblyFormat = [{ - $tensor (`read_only` $read_only^)? attr-dict `:` type($tensor) `to` type($memref) + $tensor (`read_only` $read_only^)? attr-dict `:` type($tensor) `to` type($buffer) }]; let hasFolder = 1; diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h new file mode 100644 index 000000000000..a2bfcb7ed2b7 --- /dev/null +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h @@ -0,0 +1,26 @@ +//===- BufferizationTypeInterfaces.h - Type Interfaces ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZATIONTYPEINTERFACES_H_ +#define MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZATIONTYPEINTERFACES_H_ + +//===----------------------------------------------------------------------===// +// Bufferization Type Interfaces +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Diagnostics.h" +#include "mlir/IR/Types.h" + +namespace mlir::bufferization { +struct BufferizationOptions; +class BufferLikeType; +} // namespace mlir::bufferization + +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h.inc" + +#endif // MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZATIONTYPEINTERFACES_H_ diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td new file mode 100644 index 000000000000..fb6fc4f5ad96 --- /dev/null +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td @@ -0,0 +1,62 @@ +//===- BufferizationTypeInterfaces.td - Type Interfaces ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This is the definition file for type interfaces used in Bufferization. +// +//===----------------------------------------------------------------------===// + +#ifndef BUFFERIZATION_TYPE_INTERFACES +#define BUFFERIZATION_TYPE_INTERFACES + +include "mlir/IR/OpBase.td" + +def Bufferization_TensorLikeTypeInterface + : TypeInterface<"TensorLikeType"> { + let cppNamespace = "::mlir::bufferization"; + let description = [{ + Indicates that this type is a tensor type (similarly to a MLIR builtin + tensor) for bufferization purposes. + }]; + + let methods = [ + InterfaceMethod<[{ + Returns a BufferLike type for this TensorLike type. + }], + /*retTy=*/"::mlir::FailureOr<::mlir::bufferization::BufferLikeType>", + /*methodName=*/"getBufferType", + /*args=*/(ins + "const ::mlir::bufferization::BufferizationOptions &":$options, + "::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError + ) + >, + InterfaceMethod<[{ + Returns whether a BufferLike type is compatible to this TensorLike type. + The BufferLike type is assumed to be created by getBufferType(). + }], + /*retTy=*/"::mlir::LogicalResult", + /*methodName=*/"verifyCompatibleBufferType", + /*args=*/(ins + "::mlir::bufferization::BufferLikeType":$bufferType, + "::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError) + > + ]; +} + +def Bufferization_BufferLikeTypeInterface + : TypeInterface<"BufferLikeType"> { + let cppNamespace = "::mlir::bufferization"; + let description = [{ + Indicates that this type is a buffer type (similarly to a MLIR builtin + memref) for bufferization purposes. + + The interface currently has no methods as it is used by types to opt into + being supported by the bufferization procedures. + }]; +} + +#endif // BUFFERIZATION_TYPE_INTERFACES diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt index 13a5bc370a4f..3ead52148c20 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt @@ -10,3 +10,9 @@ mlir_tablegen(BufferizationEnums.h.inc -gen-enum-decls) mlir_tablegen(BufferizationEnums.cpp.inc -gen-enum-defs) add_public_tablegen_target(MLIRBufferizationEnumsIncGen) add_dependencies(mlir-headers MLIRBufferizationEnumsIncGen) + +set(LLVM_TARGET_DEFINITIONS BufferizationTypeInterfaces.td) +mlir_tablegen(BufferizationTypeInterfaces.h.inc -gen-type-interface-decls) +mlir_tablegen(BufferizationTypeInterfaces.cpp.inc -gen-type-interface-defs) +add_public_tablegen_target(MLIRBufferizationTypeInterfacesIncGen) +add_dependencies(mlir-headers MLIRBufferizationTypeInterfacesIncGen) diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/UnstructuredControlFlow.h b/mlir/include/mlir/Dialect/Bufferization/IR/UnstructuredControlFlow.h index 78109770efab..e9e8f45c9a4b 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/UnstructuredControlFlow.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/UnstructuredControlFlow.h @@ -32,7 +32,7 @@ template struct OpWithUnstructuredControlFlowBufferizableOpInterfaceExternalModel : public BufferizableOpInterface::ExternalModel { - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { // Note: The user may want to override this function for OpResults in @@ -65,12 +65,13 @@ struct OpWithUnstructuredControlFlowBufferizableOpInterfaceExternalModel // The operand was already bufferized. Take its type directly. callerType = memrefType; } else { - FailureOr maybeCallerType = + FailureOr maybeCallerType = bufferization::getBufferType(opOperand->get(), options, invocationStack); if (failed(maybeCallerType)) return failure(); - callerType = *maybeCallerType; + assert(isa(*maybeCallerType) && "expected memref type"); + callerType = cast(*maybeCallerType); } if (!bufferType) { @@ -109,7 +110,7 @@ struct OpWithUnstructuredControlFlowBufferizableOpInterfaceExternalModel if (!bufferType) return op->emitOpError("could not infer buffer type of block argument"); - return bufferType; + return cast(bufferType); } protected: diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h index 2f495d304b4a..d5cb8d8eb673 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h @@ -50,7 +50,7 @@ LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, /// Bufferize the signature of `block` and its callers (i.e., ops that have the /// given block as a successor). All block argument types are changed to memref /// types. All corresponding operands of all callers are wrapped in -/// bufferization.to_memref ops. All uses of bufferized tensor block arguments +/// bufferization.to_buffer ops. All uses of bufferized tensor block arguments /// are wrapped in bufferization.to_tensor ops. /// /// It is expected that all callers implement the `BranchOpInterface`. diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td index 3bcde8edde50..a18e834a5a77 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -120,7 +120,7 @@ def OwnershipBasedBufferDeallocation : Pass< Otherwise, the pass that bufferizes the remaining tensors is responsible to add the corresponding deallocation operations. Note that this pass does not consider any values of tensor type and assumes that MemRef values defined by - `bufferization.to_memref` do not return ownership and do not have to be + `bufferization.to_buffer` do not return ownership and do not have to be deallocated. `bufferization.to_tensor` operations are handled similarly to `bufferization.clone` operations with the exception that the result value is not handled because it's a tensor (not a MemRef). @@ -395,7 +395,7 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { One-Shot Bufferize will by default reject IR that contains non-bufferizable op, i.e., ops that do not implemement BufferizableOpInterface. Such IR can - be allowed with `allow-unknown-ops=1`. In that case, to_memref and to_tensor + be allowed with `allow-unknown-ops=1`. In that case, to_buffer and to_tensor ops will be generated at the bufferization boundary. This is useful for compatibility with existing partial bufferization passes: These can bufferize the remaining IR after running One-Shot Bufferize. @@ -415,7 +415,7 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { One-Shot Bufferize will by default assume memref types with fully dynamic layout maps when a precise layout cannot be inferred. E.g., this is the case - when wrapping a non-bufferizable op in to_memref/to_tensor ops. This + when wrapping a non-bufferizable op in to_buffer/to_tensor ops. This behavior can be overridden with `unknown-type-conversion`. Valid values are `fully-dynamic-layout-map` and `identity-layout-map`. @@ -537,6 +537,10 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { Statistic<"numTensorOutOfPlace", "num-tensor-out-of-place", "Number of out-of-place tensor OpOperands">, ]; + + let dependentDialects = [ + "bufferization::BufferizationDialect", "memref::MemRefDialect" + ]; } def PromoteBuffersToStack : Pass<"promote-buffers-to-stack", "func::FuncOp"> { diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt index df4a7238e55c..f71023519733 100644 --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -42,6 +42,4 @@ add_subdirectory(UB) add_subdirectory(Utils) add_subdirectory(Vector) add_subdirectory(X86Vector) -if(MLIR_DIALECT_XEGPU_ENABLE) - add_subdirectory(XeGPU) -endif() +add_subdirectory(XeGPU) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index 2c281c9f6aa8..a61d90a0c39b 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -266,9 +266,9 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", let summary = "Extracts the `level`-th positions array of the `tensor`"; let description = [{ Returns the positions array of the tensor's storage at the given - level. This is similar to the `bufferization.to_memref` operation + level. This is similar to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view - and a bufferized world view. Unlike the `bufferization.to_memref` + and a bufferized world view. Unlike the `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the positions array from the sparse storage itself (either by calling a support library or through direct code). @@ -295,9 +295,9 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", let summary = "Extracts the `level`-th coordinates array of the `tensor`"; let description = [{ Returns the coordinates array of the tensor's storage at the given - level. This is similar to the `bufferization.to_memref` operation + level. This is similar to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view - and a bufferized world view. Unlike the `bufferization.to_memref` + and a bufferized world view. Unlike the `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the coordinates array from the sparse storage itself (either by calling a support library or through direct code). @@ -326,9 +326,9 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", Returns the linear coordinates array for a sparse tensor with a trailing COO region with at least two levels. It is an error if the tensor doesn't contain such a COO region. This is similar - to the `bufferization.to_memref` operation in the sense that it + to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view and a bufferized - world view. Unlike the `bufferization.to_memref` operation, + world view. Unlike the `bufferization.to_buffer` operation, however, this operation actually lowers into code that extracts the linear coordinates array from the sparse storage scheme that stores the coordinates for the COO region as an array of structures. @@ -359,9 +359,9 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values", let description = [{ Returns the values array of the sparse storage format for the given sparse tensor, independent of the actual dimension. This is similar to - the `bufferization.to_memref` operation in the sense that it provides a bridge + the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view and a bufferized world view. Unlike the - `bufferization.to_memref` operation, however, this sparse operation actually + `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the values array from the sparse storage scheme (either by calling a support library or through direct code). diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td index 5559995aa261..8aa2c5557015 100644 --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -143,20 +143,24 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> { /// Return the number of elements present in the given shape. static int64_t getNumElements(ArrayRef shape); - }]; - let extraSharedClassDeclaration = [{ /// Return a clone of this type with the given new shape and element type. + /// The returned type is ranked, even if this type is unranked. auto clone(::llvm::ArrayRef shape, Type elementType) { - return $_type.cloneWith(shape, elementType); + return cloneWith(shape, elementType); } - /// Return a clone of this type with the given new shape. + /// Return a clone of this type with the given new shape. The returned type + /// is ranked, even if this type is unranked. auto clone(::llvm::ArrayRef shape) { - return $_type.cloneWith(shape, $_type.getElementType()); + return cloneWith(shape, getElementType()); } + }]; - /// Return a clone of this type with the given new element type. + let extraSharedClassDeclaration = [{ + /// Return a clone of this type with the given new element type. The + /// returned type is ranked if and only if this type is ranked. In that + /// case, the returned type has the same shape as this type. auto clone(::mlir::Type elementType) { return $_type.cloneWith(/*shape=*/std::nullopt, elementType); } @@ -223,68 +227,4 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> { }]; } -//===----------------------------------------------------------------------===// -// TensorTypeInterface -//===----------------------------------------------------------------------===// - -def TensorTypeInterface : TypeInterface<"TensorType", [ - ShapedTypeInterface - ] - > { - let cppNamespace = "::mlir"; - let description = [{ - This interface provides a shared interface for ranked and unranked type - and customized tensor types. - - This class attaches the ShapedTypeInterface to act as a mixin to - provide many useful utility functions. - }]; - - let extraClassDeclaration = [{ - // Return true if the specified element type is ok in a tensor. - static bool isValidElementType(::mlir::Type type); - }]; - - let extraClassOf = [{ - return $_type.hasTrait<::mlir::TensorType::Trait>(); - }]; - -} - -//===----------------------------------------------------------------------===// -// BaseMemRefTypeInterface -//===----------------------------------------------------------------------===// - -def BaseMemRefTypeInterface : TypeInterface<"BaseMemRefType", [ - ShapedTypeInterface - ] - > { - let cppNamespace = "::mlir"; - let description = [{ - This interface provides a shared interface for ranked and unranked memref and - customized memref types. - - This interface attaches the ShapedTypeInterface to act as a mixin to - provide many useful utility functions. - }]; - - let methods = [ - InterfaceMethod<[{ - Returns the memory space in which data referred to by this memref resides. - }], - "::mlir::Attribute", "getMemorySpace">, - ]; - - let extraClassDeclaration = [{ - // Return true if the specified element type is ok in a memref. - static bool isValidElementType(::mlir::Type type); - - unsigned getMemorySpaceAsInt() const; - }]; - - let extraClassOf = [{ - return $_type.hasTrait<::mlir::BaseMemRefType::Trait>(); - }]; -} - #endif // MLIR_IR_BUILTINTYPEINTERFACES_TD_ diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h index 9d7db8970e50..df1e02732617 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -43,6 +43,108 @@ template class ValueSemantics : public TypeTrait::TraitBase {}; +//===----------------------------------------------------------------------===// +// TensorType +//===----------------------------------------------------------------------===// + +/// Tensor types represent multi-dimensional arrays, and have two variants: +/// RankedTensorType and UnrankedTensorType. +/// Note: This class attaches the ShapedType trait to act as a mixin to +/// provide many useful utility functions. This inheritance has no effect +/// on derived tensor types. +class TensorType : public Type, public ShapedType::Trait { +public: + using Type::Type; + + /// Returns the element type of this tensor type. + Type getElementType() const; + + /// Returns if this type is ranked, i.e. it has a known number of dimensions. + bool hasRank() const; + + /// Returns the shape of this tensor type. + ArrayRef getShape() const; + + /// Clone this type with the given shape and element type. If the + /// provided shape is `std::nullopt`, the current shape of the type is used. + TensorType cloneWith(std::optional> shape, + Type elementType) const; + + // Make sure that base class overloads are visible. + using ShapedType::Trait::clone; + + /// Return a clone of this type with the given new shape and element type. + /// The returned type is ranked, even if this type is unranked. + RankedTensorType clone(ArrayRef shape, Type elementType) const; + + /// Return a clone of this type with the given new shape. The returned type + /// is ranked, even if this type is unranked. + RankedTensorType clone(ArrayRef shape) const; + + /// Return true if the specified element type is ok in a tensor. + static bool isValidElementType(Type type); + + /// Methods for support type inquiry through isa, cast, and dyn_cast. + static bool classof(Type type); + + /// Allow implicit conversion to ShapedType. + operator ShapedType() const { return llvm::cast(*this); } +}; + +//===----------------------------------------------------------------------===// +// BaseMemRefType +//===----------------------------------------------------------------------===// + +/// This class provides a shared interface for ranked and unranked memref types. +/// Note: This class attaches the ShapedType trait to act as a mixin to +/// provide many useful utility functions. This inheritance has no effect +/// on derived memref types. +class BaseMemRefType : public Type, public ShapedType::Trait { +public: + using Type::Type; + + /// Returns the element type of this memref type. + Type getElementType() const; + + /// Returns if this type is ranked, i.e. it has a known number of dimensions. + bool hasRank() const; + + /// Returns the shape of this memref type. + ArrayRef getShape() const; + + /// Clone this type with the given shape and element type. If the + /// provided shape is `std::nullopt`, the current shape of the type is used. + BaseMemRefType cloneWith(std::optional> shape, + Type elementType) const; + + // Make sure that base class overloads are visible. + using ShapedType::Trait::clone; + + /// Return a clone of this type with the given new shape and element type. + /// The returned type is ranked, even if this type is unranked. + MemRefType clone(ArrayRef shape, Type elementType) const; + + /// Return a clone of this type with the given new shape. The returned type + /// is ranked, even if this type is unranked. + MemRefType clone(ArrayRef shape) const; + + /// Return true if the specified element type is ok in a memref. + static bool isValidElementType(Type type); + + /// Methods for support type inquiry through isa, cast, and dyn_cast. + static bool classof(Type type); + + /// Returns the memory space in which data referred to by this memref resides. + Attribute getMemorySpace() const; + + /// [deprecated] Returns the memory space in old raw integer representation. + /// New `Attribute getMemorySpace()` method should be used instead. + unsigned getMemorySpaceAsInt() const; + + /// Allow implicit conversion to ShapedType. + operator ShapedType() const { return llvm::cast(*this); } +}; + } // namespace mlir //===----------------------------------------------------------------------===// @@ -53,6 +155,7 @@ class ValueSemantics #include "mlir/IR/BuiltinTypes.h.inc" namespace mlir { +#include "mlir/IR/BuiltinTypeConstraints.h.inc" //===----------------------------------------------------------------------===// // MemRefType @@ -250,7 +353,7 @@ enum class SliceVerificationResult { /// code. SliceVerificationResult isRankReducedType(ShapedType originalType, ShapedType candidateReducedType); - + //===----------------------------------------------------------------------===// // Convenience wrappers for VectorType // @@ -287,6 +390,10 @@ class FixedVectorType : public VectorType { // Deferred Method Definitions //===----------------------------------------------------------------------===// +inline bool BaseMemRefType::classof(Type type) { + return llvm::isa(type); +} + inline bool BaseMemRefType::isValidElementType(Type type) { return type.isIntOrIndexOrFloat() || llvm::isa( @@ -294,37 +401,14 @@ inline bool BaseMemRefType::isValidElementType(Type type) { llvm::isa(type); } +inline bool TensorType::classof(Type type) { + return llvm::isa(type); +} + //===----------------------------------------------------------------------===// // Type Utilities //===----------------------------------------------------------------------===// -/// Returns the strides of the MemRef if the layout map is in strided form. -/// MemRefs with a layout map in strided form include: -/// 1. empty or identity layout map, in which case the stride information is -/// the canonical form computed from sizes; -/// 2. a StridedLayoutAttr layout; -/// 3. any other layout that be converted into a single affine map layout of -/// the form `K + k0 * d0 + ... kn * dn`, where K and ki's are constants or -/// symbols. -/// -/// A stride specification is a list of integer values that are either static -/// or dynamic (encoded with ShapedType::kDynamic). Strides encode -/// the distance in the number of elements between successive entries along a -/// particular dimension. -LogicalResult getStridesAndOffset(MemRefType t, - SmallVectorImpl &strides, - int64_t &offset); - -/// Wrapper around getStridesAndOffset(MemRefType, SmallVectorImpl, -/// int64_t) that will assert if the logical result is not succeeded. -std::pair, int64_t> getStridesAndOffset(MemRefType t); - -/// Return a version of `t` with identity layout if it can be determined -/// statically that the layout is the canonical contiguous strided layout. -/// Otherwise pass `t`'s layout into `simplifyAffineMap` and return a copy of -/// `t` with simplified layout. -MemRefType canonicalizeStridedLayout(MemRefType t); - /// Given MemRef `sizes` that are either static or dynamic, returns the /// canonical "contiguous" strides AffineExpr. Strides are multiplicative and /// once a dynamic dimension is encountered, all canonical strides become @@ -347,24 +431,6 @@ AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef sizes, /// where `exprs` is {d0, d1, .., d_(sizes.size()-1)} AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef sizes, MLIRContext *context); - -/// Return "true" if the layout for `t` is compatible with strided semantics. -bool isStrided(MemRefType t); - -/// Return "true" if the last dimension of the given type has a static unit -/// stride. Also return "true" for types with no strides. -bool isLastMemrefDimUnitStride(MemRefType type); - -/// Return "true" if the last N dimensions of the given type are contiguous. -/// -/// Examples: -/// - memref<5x4x3x2xi8, strided<[24, 6, 2, 1]> is contiguous when -/// considering both _all_ and _only_ the trailing 3 dims, -/// - memref<5x4x3x2xi8, strided<[48, 6, 2, 1]> is _only_ contiguous when -/// considering the trailing 3 dims. -/// -bool trailingNDimsContiguous(MemRefType type, int64_t n); - } // namespace mlir #endif // MLIR_IR_BUILTINTYPES_H diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td index 300ca8878ae7..e5a2ae81da0c 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -542,8 +542,8 @@ def Builtin_Integer : Builtin_Type<"Integer", "integer"> { //===----------------------------------------------------------------------===// def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ - BaseMemRefTypeInterface - ]> { + ShapedTypeInterface + ], "BaseMemRefType"> { let summary = "Shaped reference to a region of memory"; let description = [{ Syntax: @@ -794,7 +794,7 @@ def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ "unsigned":$memorySpaceInd)> ]; let extraClassDeclaration = [{ - using ShapedType::Trait::clone; + using BaseMemRefType::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -828,14 +828,6 @@ def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ /// New `Attribute getMemorySpace()` method should be used instead. unsigned getMemorySpaceAsInt() const; - /// Returns if this type is ranked (always true). - bool hasRank() const { return true; } - - /// Returns a clone of this type with the given shape and element - /// type. If a shape is not provided, the current shape of the type is used. - MemRefType cloneWith(std::optional> shape, - Type elementType) const; - /// Returns the strides of the MemRef if the layout map is in strided form. /// MemRefs with a layout map in strided form include: /// 1. empty or identity layout map, in which case the stride information @@ -942,8 +934,8 @@ def Builtin_Opaque : Builtin_Type<"Opaque", "opaque"> { //===----------------------------------------------------------------------===// def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ - TensorTypeInterface, ValueSemantics - ]> { + ShapedTypeInterface, ValueSemantics + ], "TensorType"> { let summary = "Multi-dimensional array with a fixed number of dimensions"; let description = [{ Syntax: @@ -1024,7 +1016,7 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ }]> ]; let extraClassDeclaration = [{ - using ShapedType::Trait::clone; + using TensorType::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1038,12 +1030,11 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ /// Arguments that are passed into the builder must outlive the builder. class Builder; - /// Returns if this type is ranked (always true). - bool hasRank() const { return true; } - - /// Returns a clone of this type with the given shape and element type. - RankedTensorType cloneWith(std::optional> shape, - Type elementType) const; + /// Return a clone of this type with the given new element type and the same + /// shape as this type. + RankedTensorType clone(::mlir::Type elementType) { + return ::llvm::cast(cloneWith(getShape(), elementType)); + } }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1121,8 +1112,8 @@ def Builtin_Tuple : Builtin_Type<"Tuple", "tuple"> { //===----------------------------------------------------------------------===// def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ - BaseMemRefTypeInterface - ]> { + ShapedTypeInterface + ], "BaseMemRefType"> { let summary = "Shaped reference, with unknown rank, to a region of memory"; let description = [{ Syntax: @@ -1168,7 +1159,7 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ }]> ]; let extraClassDeclaration = [{ - using ShapedType::Trait::clone; + using BaseMemRefType::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1184,12 +1175,11 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ /// New `Attribute getMemorySpace()` method should be used instead. unsigned getMemorySpaceAsInt() const; - /// Returns if this type is ranked (always false). - bool hasRank() const { return false; } - - /// Returns a clone of this type with the given shape and element type. - BaseMemRefType cloneWith(std::optional> shape, - Type elementType) const; + /// Return a clone of this type with the given new element type and the same + /// shape as this type. + MemRefType clone(::mlir::Type elementType) { + return ::llvm::cast(cloneWith(getShape(), elementType)); + } }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1200,8 +1190,8 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ //===----------------------------------------------------------------------===// def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ - TensorTypeInterface, ValueSemantics - ]> { + ShapedTypeInterface, ValueSemantics + ], "TensorType"> { let summary = "Multi-dimensional array with unknown dimensions"; let description = [{ Syntax: @@ -1228,7 +1218,7 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ }]> ]; let extraClassDeclaration = [{ - using ShapedType::Trait::clone; + using TensorType::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1239,13 +1229,6 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ using ShapedType::Trait::getDynamicDimIndex; ArrayRef getShape() const { return std::nullopt; } - - /// Returns if this type is ranked (always false). - bool hasRank() const { return false; } - - /// Returns a clone of this type with the given shape and element type. - TensorType cloneWith(std::optional> shape, - Type elementType) const; }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1255,6 +1238,10 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ // VectorType //===----------------------------------------------------------------------===// +def Builtin_VectorTypeElementType : AnyTypeOf<[AnyInteger, Index, AnyFloat]> { + let cppFunctionName = "isValidVectorTypeElementType"; +} + def Builtin_Vector : Builtin_Type<"Vector", "vector", [ShapedTypeInterface, ValueSemantics], "Type"> { let summary = "Multi-dimensional SIMD vector type"; @@ -1305,7 +1292,7 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector", }]; let parameters = (ins ArrayRefParameter<"int64_t">:$shape, - "Type":$elementType, + Builtin_VectorTypeElementType:$elementType, ArrayRefParameter<"bool">:$scalableDims ); let builders = [ @@ -1329,11 +1316,8 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector", class Builder; /// Returns true if the given type can be used as an element of a vector - /// type. In particular, vectors can consist of integer, index, or float - /// primitives. - static bool isValidElementType(Type t) { - return ::llvm::isa(t); - } + /// type. See "Builtin_VectorTypeElementType" for allowed types. + static bool isValidElementType(Type t); /// Returns true if the vector contains scalable dimensions. bool isScalable() const { diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h index c5f661c41068..0da82825c828 100644 --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -94,9 +94,7 @@ #include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h" #include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h" #include "mlir/Dialect/X86Vector/X86VectorDialect.h" -#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Dialect/XeGPU/IR/XeGPU.h" -#endif #include "mlir/IR/Dialect.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Target/LLVM/NVVM/Target.h" @@ -151,13 +149,10 @@ inline void registerAllDialects(DialectRegistry ®istry) { transform::TransformDialect, ub::UBDialect, vector::VectorDialect, - x86vector::X86VectorDialect>(); + x86vector::X86VectorDialect, + xegpu::XeGPUDialect>(); // clang-format on -#ifdef MLIR_DIALECT_XEGPU_ENABLE - register.insert(); -#endif - // Register all external models. affine::registerValueBoundsOpInterfaceExternalModels(registry); arith::registerBufferDeallocationOpInterfaceExternalModels(registry); diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h index 385a24575a1f..dd8b292a8734 100644 --- a/mlir/include/mlir/InitAllPasses.h +++ b/mlir/include/mlir/InitAllPasses.h @@ -45,9 +45,7 @@ #include "mlir/Dialect/Tosa/Transforms/Passes.h" #include "mlir/Dialect/Transform/Transforms/Passes.h" #include "mlir/Dialect/Vector/Transforms/Passes.h" -#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Dialect/XeGPU/Transforms/Passes.h" -#endif #include "mlir/Transforms/Passes.h" #include @@ -96,10 +94,7 @@ inline void registerAllPasses() { arm_sme::registerArmSMEPasses(); arm_sve::registerArmSVEPasses(); emitc::registerEmitCPasses(); - -#ifdef MLIR_DIALECT_XEGPU_ENABLE xegpu::registerXeGPUPasses(); -#endif // Dialect pipelines bufferization::registerBufferizationPipelines(); diff --git a/mlir/lib/Conversion/CMakeLists.txt b/mlir/lib/Conversion/CMakeLists.txt index 81d6eb320a32..a570978f0375 100644 --- a/mlir/lib/Conversion/CMakeLists.txt +++ b/mlir/lib/Conversion/CMakeLists.txt @@ -62,9 +62,7 @@ add_subdirectory(TosaToArith) add_subdirectory(TosaToLinalg) add_subdirectory(TosaToMLProgram) add_subdirectory(TosaToSCF) -if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) - add_subdirectory(TosaToTensor) -endif() +add_subdirectory(TosaToTensor) add_subdirectory(UBToLLVM) add_subdirectory(UBToSPIRV) add_subdirectory(VectorToArmSME) @@ -72,6 +70,4 @@ add_subdirectory(VectorToGPU) add_subdirectory(VectorToLLVM) add_subdirectory(VectorToSCF) add_subdirectory(VectorToSPIRV) -if(MLIR_DIALECT_XEGPU_ENABLE) - add_subdirectory(VectorToXeGPU) -endif() +add_subdirectory(VectorToXeGPU) diff --git a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp index eb265c621564..99b1e936a891 100644 --- a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp +++ b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp @@ -255,7 +255,7 @@ struct ConvertUpdateHaloOp // If the destination is a memref, we need to cast it to a tensor auto tensorType = MemRefType::get( dstShape, cast(array.getType()).getElementType()); - array = rewriter.create(loc, tensorType, array) + array = rewriter.create(loc, tensorType, array) .getResult(); } auto rank = cast(array.getType()).getRank(); diff --git a/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt b/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt index 992b2f816478..2870baa20757 100644 --- a/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt +++ b/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt @@ -1,22 +1,20 @@ -if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) - add_mlir_conversion_library(MLIRTosaToTensor - TosaToTensor.cpp - TosaToTensorPass.cpp +add_mlir_conversion_library(MLIRTosaToTensor + TosaToTensor.cpp + TosaToTensorPass.cpp - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Tosa - ${MLIR_MAIN_INCLUDE_DIR}/mlir/IR + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Tosa + ${MLIR_MAIN_INCLUDE_DIR}/mlir/IR - DEPENDS - MLIRConversionPassIncGen + DEPENDS + MLIRConversionPassIncGen - LINK_LIBS PUBLIC - MLIRTensorDialect - MLIRTensorUtils - MLIRIR - MLIRPass - MLIRTosaDialect - MLIRTosaTransforms - MLIRSupport - ) -endif() + LINK_LIBS PUBLIC + MLIRTensorDialect + MLIRTensorUtils + MLIRIR + MLIRPass + MLIRTosaDialect + MLIRTosaTransforms + MLIRSupport + ) diff --git a/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt b/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt index bcb1397385fc..567083da0023 100644 --- a/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt +++ b/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt @@ -1,18 +1,16 @@ -if(MLIR_DIALECT_XEGPU_ENABLE) - add_mlir_conversion_library(MLIRVectorToXeGPU - VectorToXeGPU.cpp +add_mlir_conversion_library(MLIRVectorToXeGPU + VectorToXeGPU.cpp - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToXeGPU + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToXeGPU - DEPENDS - MLIRConversionPassIncGen + DEPENDS + MLIRConversionPassIncGen - LINK_LIBS PUBLIC - MLIRArithDialect - MLIRMemRefDialect - MLIRTransforms - MLIRVectorDialect - MLIRXeGPUDialect - ) -endif() + LINK_LIBS PUBLIC + MLIRArithDialect + MLIRMemRefDialect + MLIRTransforms + MLIRVectorDialect + MLIRXeGPUDialect + ) diff --git a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp index 5e69a98db8f1..42f9e087cce9 100644 --- a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp @@ -159,8 +159,8 @@ struct SelectOpInterface // buffers have different types, they differ only in their layout map. Cast // both of them to the most dynamic MemRef type. if (trueBuffer.getType() != falseBuffer.getType()) { - auto targetType = - bufferization::getBufferType(selectOp.getResult(), options); + auto targetType = bufferization::detail::asMemRefType( + bufferization::getBufferType(selectOp.getResult(), options)); if (failed(targetType)) return failure(); if (trueBuffer.getType() != *targetType) @@ -176,29 +176,31 @@ struct SelectOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto selectOp = cast(op); assert(value == selectOp.getResult() && "invalid value"); - auto trueType = bufferization::getBufferType(selectOp.getTrueValue(), - options, invocationStack); - auto falseType = bufferization::getBufferType(selectOp.getFalseValue(), - options, invocationStack); + auto trueType = + bufferization::detail::asMemRefType(bufferization::getBufferType( + selectOp.getTrueValue(), options, invocationStack)); + auto falseType = + bufferization::detail::asMemRefType(bufferization::getBufferType( + selectOp.getFalseValue(), options, invocationStack)); if (failed(trueType) || failed(falseType)) return failure(); if (*trueType == *falseType) - return *trueType; + return cast(*trueType); if (trueType->getMemorySpace() != falseType->getMemorySpace()) return op->emitError("inconsistent memory space on true/false operands"); // If the buffers have different types, they differ only in their layout // map. auto memrefType = llvm::cast(*trueType); - return getMemRefTypeWithFullyDynamicLayout( + return cast(getMemRefTypeWithFullyDynamicLayout( RankedTensorType::get(memrefType.getShape(), memrefType.getElementType()), - memrefType.getMemorySpace()); + memrefType.getMemorySpace())); } }; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp index 7967f2973e6a..2d57a341c3eb 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -150,7 +150,9 @@ FailureOr bufferization::allocateTensorForShapedValue( if (llvm::isa(shapedValue.getType())) { tensor = shapedValue; } else if (llvm::isa(shapedValue.getType())) { - tensor = b.create(loc, shapedValue); + tensor = b.create( + loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()), + shapedValue); } else if (llvm::isa(shapedValue.getType()) || llvm::isa(shapedValue.getType())) { return getOwnerOfValue(shapedValue) @@ -190,7 +192,7 @@ FailureOr bufferization::allocateTensorForShapedValue( // Add 'memory_space' attribute. Not needed if 'copy' operand is specified. if (copy) return allocTensorOp.getResult(); - FailureOr copyBufferType = getBufferType(tensor, options); + auto copyBufferType = detail::asMemRefType(getBufferType(tensor, options)); if (failed(copyBufferType)) return failure(); std::optional memorySpace = copyBufferType->getMemorySpace(); @@ -312,18 +314,27 @@ bool OpFilter::isOpAllowed(Operation *op) const { namespace { /// Default function arg type converter: Use a fully dynamic layout map. -BaseMemRefType -defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace, +BufferLikeType +defaultFunctionArgTypeConverter(TensorLikeType type, Attribute memorySpace, func::FuncOp funcOp, const BufferizationOptions &options) { - return getMemRefTypeWithFullyDynamicLayout(type, memorySpace); + if (auto tensorType = mlir::dyn_cast(type)) { + return cast( + getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace)); + } + + // If not builtin, fallback to TensorLikeType::getBufferType() + auto bufferType = + type.getBufferType(options, [&]() { return funcOp->emitError(); }); + assert(succeeded(bufferType) && + "a valid buffer is always expected at function boundary"); + return *bufferType; } /// Default unknown type converter: Use a fully dynamic layout map. BaseMemRefType -defaultUnknownTypeConverter(Value value, Attribute memorySpace, +defaultUnknownTypeConverter(TensorType tensorType, Attribute memorySpace, const BufferizationOptions &options) { - return getMemRefTypeWithFullyDynamicLayout( - llvm::cast(value.getType()), memorySpace); + return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace); } } // namespace @@ -360,14 +371,25 @@ BufferizationOptions::dynCastBufferizableOp(Value value) const { void BufferizationOptions::setFunctionBoundaryTypeConversion( LayoutMapOption layoutMapOption) { - functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace, + functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace, func::FuncOp funcOp, const BufferizationOptions &options) { - if (layoutMapOption == LayoutMapOption::IdentityLayoutMap) - return bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType, - memorySpace); - return bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType, - memorySpace); + if (auto tensorType = mlir::dyn_cast(type)) { + if (layoutMapOption == LayoutMapOption::IdentityLayoutMap) + return cast( + bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType, + memorySpace)); + return cast( + bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType, + memorySpace)); + } + + // If not builtin, fallback to TensorLikeType::getBufferType() + auto bufferType = + type.getBufferType(options, [&]() { return funcOp->emitError(); }); + assert(succeeded(bufferType) && + "a valid buffer is always expected at function boundary"); + return *bufferType; }; inferFunctionResultLayout = layoutMapOption == LayoutMapOption::InferLayoutMap; @@ -608,8 +630,8 @@ bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const { } bool AnalysisState::isInPlace(OpOperand &opOperand) const { - // ToMemrefOps are always in-place. - if (isa(opOperand.getOwner())) + // ToBufferOps are always in-place. + if (isa(opOperand.getOwner())) return true; // In the absence of analysis information, OpOperands that bufferize to a @@ -634,51 +656,51 @@ bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const { return false; } -// bufferization.to_memref is not allowed to change the rank. -static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) { +// bufferization.to_buffer is not allowed to change the rank. +static void ensureToBufferOpIsValid(Value tensor, Type memrefType) { #ifndef NDEBUG auto rankedTensorType = llvm::dyn_cast(tensor.getType()); assert((!rankedTensorType || llvm::cast(memrefType).getRank() == rankedTensorType.getRank()) && - "to_memref would be invalid: mismatching ranks"); + "to_buffer would be invalid: mismatching ranks"); #endif } FailureOr bufferization::getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options) { #ifndef NDEBUG - auto tensorType = llvm::dyn_cast(value.getType()); + auto tensorType = llvm::dyn_cast(value.getType()); assert(tensorType && "unexpected non-tensor type"); #endif // NDEBUG // Replace "%t = to_tensor %m" with %m. if (auto toTensorOp = value.getDefiningOp()) - return toTensorOp.getMemref(); + return toTensorOp.getBuffer(); - // Insert to_memref op. + // Insert to_buffer op. OpBuilder::InsertionGuard g(rewriter); setInsertionPointAfter(rewriter, value); - FailureOr memrefType = getBufferType(value, options); - if (failed(memrefType)) + FailureOr bufferType = getBufferType(value, options); + if (failed(bufferType)) return failure(); - ensureToMemrefOpIsValid(value, *memrefType); + ensureToBufferOpIsValid(value, *bufferType); return rewriter - .create(value.getLoc(), *memrefType, value) + .create(value.getLoc(), *bufferType, value) .getResult(); } /// Return the buffer type for a given Value (tensor) after bufferization. -FailureOr +FailureOr bufferization::getBufferType(Value value, const BufferizationOptions &options) { SmallVector invocationStack; return getBufferType(value, options, invocationStack); } /// Return the buffer type for a given Value (tensor) after bufferization. -FailureOr +FailureOr bufferization::getBufferType(Value value, const BufferizationOptions &options, SmallVector &invocationStack) { - assert(llvm::isa(value.getType()) && + assert(llvm::isa(value.getType()) && "unexpected non-tensor type"); invocationStack.push_back(value); auto popFromStack = @@ -691,12 +713,9 @@ bufferization::getBufferType(Value value, const BufferizationOptions &options, return bufferizableOp.getBufferType(value, options, invocationStack); // Op is not bufferizable. - auto memSpace = - options.defaultMemorySpaceFn(cast(value.getType())); - if (!memSpace.has_value()) - return op->emitError("could not infer memory space"); - - return getMemRefType(value, options, /*layout=*/{}, *memSpace); + return cast(value.getType()).getBufferType(options, [&]() { + return op->emitError(); + }); } bool bufferization::hasTensorSemantics(Operation *op) { @@ -716,11 +735,11 @@ void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter, SmallVector replacements; for (OpResult opResult : op->getOpResults()) { Value replacement = values[opResult.getResultNumber()]; - if (llvm::isa(opResult.getType())) { + if (llvm::isa(opResult.getType())) { // The OpResult is a tensor. Such values are replaced with memrefs during // bufferization. - assert(llvm::isa(replacement.getType()) && - "tensor op result should be replaced with a memref value"); + assert(llvm::isa(replacement.getType()) && + "tensor op result should be replaced with a buffer value"); // The existing uses of the OpResult still expect a tensor. Insert a // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually // loose all of its users and eventually DCE away. @@ -768,12 +787,10 @@ LogicalResult BufferizationOptions::createMemCpy(OpBuilder &b, Location loc, // Bufferization-specific IRMapping support with debugging. //===----------------------------------------------------------------------===// -BaseMemRefType bufferization::getMemRefType(Value value, +BaseMemRefType bufferization::getMemRefType(TensorType tensorType, const BufferizationOptions &options, MemRefLayoutAttrInterface layout, Attribute memorySpace) { - auto tensorType = llvm::cast(value.getType()); - // Case 1: Unranked memref type. if (auto unrankedTensorType = llvm::dyn_cast(tensorType)) { @@ -790,7 +807,7 @@ BaseMemRefType bufferization::getMemRefType(Value value, memorySpace); } - return options.unknownTypeConverterFn(value, memorySpace, options); + return options.unknownTypeConverterFn(tensorType, memorySpace, options); } BaseMemRefType @@ -921,14 +938,17 @@ AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands( return AliasingOpOperandList(std::move(result)); } -FailureOr bufferization::detail::defaultGetBufferType( +FailureOr bufferization::detail::defaultGetBufferType( Value value, const BufferizationOptions &options, SmallVector &invocationStack) { assert(llvm::isa(value.getType()) && "expected tensor type"); + auto tensorType = cast(value.getType()); // No further analysis is possible for a block argument. - if (llvm::isa(value)) - return bufferization::getMemRefType(value, options); + if (llvm::isa(value)) { + return cast( + bufferization::getMemRefType(tensorType, options)); + } // Value is an OpResult. Operation *op = getOwnerOfValue(value); @@ -950,7 +970,8 @@ FailureOr bufferization::detail::defaultGetBufferType( if (!memSpace.has_value()) return op->emitError("could not infer memory space"); - return getMemRefType(value, options, /*layout=*/{}, *memSpace); + return cast( + getMemRefType(tensorType, options, /*layout=*/{}, *memSpace)); } bool bufferization::detail::defaultIsRepetitiveRegion( @@ -1000,7 +1021,7 @@ bufferization::detail::unknownGetAliasingValues(OpOperand &opOperand) { } bool bufferization::detail::defaultHasTensorSemantics(Operation *op) { - auto isaTensor = [](Type t) { return isa(t); }; + auto isaTensor = [](Type t) { return isa(t); }; bool hasTensorBlockArgument = any_of(op->getRegions(), [&](Region &r) { return any_of(r.getBlocks(), [&](Block &b) { return any_of(b.getArguments(), [&](BlockArgument bbArg) { @@ -1015,3 +1036,19 @@ bool bufferization::detail::defaultHasTensorSemantics(Operation *op) { return true; return any_of(op->getOperandTypes(), isaTensor); } + +FailureOr +bufferization::detail::asMemRefType(FailureOr bufferType) { + if (failed(bufferType)) + return failure(); + return cast(*bufferType); +} + +bool bufferization::detail::typesMatchAfterBufferization(Operation &op, + Value tensor, + Value buffer) { + return mlir::succeeded( + cast(tensor.getType()) + .verifyCompatibleBufferType(cast(buffer.getType()), + [&]() { return op.emitError(); })); +} diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp index e5a0c3c45b09..dc5c15877271 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp @@ -9,8 +9,10 @@ #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" #include "mlir/Dialect/Bufferization/IR/Bufferization.h" +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/BuiltinTypes.h" #include "mlir/Interfaces/FunctionInterfaces.h" #include "mlir/Transforms/InliningUtils.h" @@ -51,6 +53,46 @@ struct BufferizationInlinerInterface : public DialectInlinerInterface { return true; } }; + +template +struct BuiltinTensorExternalModel + : TensorLikeType::ExternalModel, + Tensor> { + llvm::FailureOr getBufferType( + mlir::Type tensor, const BufferizationOptions &options, + llvm::function_ref emitError) const { + auto tensorType = cast(tensor); + auto memSpace = options.defaultMemorySpaceFn(tensorType); + if (!memSpace.has_value()) + return emitError() << "could not infer memory space"; + + return cast( + getMemRefType(tensorType, options, /*layout=*/{}, *memSpace)); + } + + mlir::LogicalResult verifyCompatibleBufferType( + mlir::Type tensor, BufferLikeType bufferType, + llvm::function_ref emitError) const { + assert(isa(tensor) && "expected tensor type"); + assert(isa(bufferType) && "expected memref type"); + + auto tensorType = cast(tensor); + auto memrefType = cast(bufferType); + + if (tensorType.getShape() != memrefType.getShape()) + return emitError() << "shapes do not match"; + + if (tensorType.getElementType() != memrefType.getElementType()) + return emitError() << "element types do not match"; + + return mlir::success(); + } +}; + +template +struct BuiltinMemRefExternalModel + : BufferLikeType::ExternalModel, + MemRef> {}; } // namespace //===----------------------------------------------------------------------===// @@ -63,6 +105,20 @@ void mlir::bufferization::BufferizationDialect::initialize() { #include "mlir/Dialect/Bufferization/IR/BufferizationOps.cpp.inc" >(); addInterfaces(); + + // Note: Unlike with other external models, declaring bufferization's + // "promised interfaces" in builtins for TensorLike and BufferLike type + // interfaces is not possible (due to builtins being independent of + // bufferization). Thus, the compromise is to attach these interfaces directly + // during dialect initialization. + RankedTensorType::attachInterface< + BuiltinTensorExternalModel>(*getContext()); + UnrankedTensorType::attachInterface< + BuiltinTensorExternalModel>(*getContext()); + MemRefType::attachInterface>( + *getContext()); + UnrankedMemRefType::attachInterface< + BuiltinMemRefExternalModel>(*getContext()); } LogicalResult BufferizationDialect::verifyRegionArgAttribute( diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp index 1ee5345cff41..0f678abd3d37 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -14,7 +14,6 @@ #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/Matchers.h" -#include "mlir/IR/BuiltinTypeInterfaces.h" #include using namespace mlir; @@ -84,21 +83,21 @@ FailureOr mlir::bufferization::castOrReallocMemRefValue( return copy; } -/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the -/// to_memref op are different, a memref.cast is needed. -LogicalResult mlir::bufferization::foldToMemrefToTensorPair( - RewriterBase &rewriter, ToMemrefOp toMemref, +/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the +/// to_buffer op are different, a memref.cast is needed. +LogicalResult mlir::bufferization::foldToBufferToTensorPair( + RewriterBase &rewriter, ToBufferOp toBuffer, const BufferizationOptions &options) { - auto memrefToTensor = toMemref.getTensor().getDefiningOp(); - if (!memrefToTensor) + auto bufferToTensor = toBuffer.getTensor().getDefiningOp(); + if (!bufferToTensor) return failure(); - Type srcType = memrefToTensor.getMemref().getType(); - Type destType = toMemref.getType(); + Type srcType = bufferToTensor.getBuffer().getType(); + Type destType = toBuffer.getType(); // Directly rewrite if the type did not change. if (srcType == destType) { - rewriter.replaceOp(toMemref, memrefToTensor.getMemref()); + rewriter.replaceOp(toBuffer, bufferToTensor.getBuffer()); return success(); } @@ -109,11 +108,11 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair( // Ranked memref -> Ranked memref cast. if (rankedSrcType && rankedDestType) { FailureOr replacement = castOrReallocMemRefValue( - rewriter, memrefToTensor.getMemref(), rankedDestType, options); + rewriter, bufferToTensor.getBuffer(), rankedDestType, options); if (failed(replacement)) return failure(); - rewriter.replaceOp(toMemref, *replacement); + rewriter.replaceOp(toBuffer, *replacement); return success(); } @@ -126,8 +125,8 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair( // Ranked memref -> unranked memref cast: No copy needed. assert(memref::CastOp::areCastCompatible(srcType, destType) && "expected that types are cast compatible"); - rewriter.replaceOpWithNewOp(toMemref, destType, - memrefToTensor.getMemref()); + rewriter.replaceOpWithNewOp(toBuffer, destType, + bufferToTensor.getBuffer()); return success(); } @@ -223,7 +222,7 @@ AliasingValueList AllocTensorOp::getAliasingValues(OpOperand &opOperand, return {}; } -FailureOr +FailureOr AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options, SmallVector &invocationStack) { assert(value == getResult() && "invalid value"); @@ -233,8 +232,8 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options, if (getMemorySpace().has_value()) { memorySpace = *getMemorySpace(); } else if (getCopy()) { - auto copyBufferType = - bufferization::getBufferType(getCopy(), options, invocationStack); + auto copyBufferType = bufferization::detail::asMemRefType( + bufferization::getBufferType(getCopy(), options, invocationStack)); if (failed(copyBufferType)) return failure(); memorySpace = copyBufferType->getMemorySpace(); @@ -244,7 +243,8 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options, return getOperation()->emitError("could not infer memory space"); } - return getMemRefTypeWithStaticIdentityLayout(getType(), memorySpace); + return cast( + getMemRefTypeWithStaticIdentityLayout(getType(), memorySpace)); } LogicalResult AllocTensorOp::verify() { @@ -639,8 +639,9 @@ Value MaterializeInDestinationOp::buildSubsetExtraction(OpBuilder &builder, assert(getRestrict() && "expected that ops with memrefs dest have 'restrict'"); setRestrict(false); - return builder.create(loc, getDest(), /*restrict=*/true, - getWritable()); + return builder.create( + loc, memref::getTensorTypeFromMemRefType(getDest().getType()), getDest(), + /*restrict=*/true, getWritable()); } bool MaterializeInDestinationOp::isEquivalentSubset( @@ -741,12 +742,12 @@ bool ToTensorOp::isWritable(Value value, const AnalysisState &state) { } OpFoldResult ToTensorOp::fold(FoldAdaptor) { - if (auto toMemref = getMemref().getDefiningOp()) + if (auto toBuffer = getBuffer().getDefiningOp()) // Approximate alias analysis by conservatively folding only when no there // is no interleaved operation. - if (toMemref->getBlock() == this->getOperation()->getBlock() && - toMemref->getNextNode() == this->getOperation()) - return toMemref.getTensor(); + if (toBuffer->getBlock() == this->getOperation()->getBlock() && + toBuffer->getNextNode() == this->getOperation()) + return toBuffer.getTensor(); return {}; } @@ -761,7 +762,7 @@ struct DimOfToTensorFolder : public OpRewritePattern { return failure(); rewriter.replaceOpWithNewOp( - dimOp, memrefToTensorOp.getMemref(), dimOp.getIndex()); + dimOp, memrefToTensorOp.getBuffer(), dimOp.getIndex()); return success(); } }; @@ -773,26 +774,26 @@ void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results, } //===----------------------------------------------------------------------===// -// ToMemrefOp +// ToBufferOp //===----------------------------------------------------------------------===// -OpFoldResult ToMemrefOp::fold(FoldAdaptor) { +OpFoldResult ToBufferOp::fold(FoldAdaptor) { if (auto memrefToTensor = getTensor().getDefiningOp()) - if (memrefToTensor.getMemref().getType() == getType()) - return memrefToTensor.getMemref(); + if (memrefToTensor.getBuffer().getType() == getType()) + return memrefToTensor.getBuffer(); return {}; } namespace { -/// Replace tensor.cast + to_memref by to_memref + memref.cast. -struct ToMemrefOfCast : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +/// Replace tensor.cast + to_buffer by to_buffer + memref.cast. +struct ToBufferOfCast : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(ToMemrefOp toMemref, + LogicalResult matchAndRewrite(ToBufferOp toBuffer, PatternRewriter &rewriter) const final { auto tensorCastOperand = - toMemref.getOperand().getDefiningOp(); + toBuffer.getOperand().getDefiningOp(); if (!tensorCastOperand) return failure(); auto srcTensorType = llvm::dyn_cast( @@ -801,51 +802,51 @@ struct ToMemrefOfCast : public OpRewritePattern { return failure(); auto memrefType = MemRefType::get(srcTensorType.getShape(), srcTensorType.getElementType()); - Value memref = rewriter.create(toMemref.getLoc(), memrefType, + Value memref = rewriter.create(toBuffer.getLoc(), memrefType, tensorCastOperand.getOperand()); - rewriter.replaceOpWithNewOp(toMemref, toMemref.getType(), + rewriter.replaceOpWithNewOp(toBuffer, toBuffer.getType(), memref); return success(); } }; -/// Canonicalize bufferization.to_tensor + bufferization.to_memref. Insert a +/// Canonicalize bufferization.to_tensor + bufferization.to_buffer. Insert a /// cast if necessary. -struct ToMemrefToTensorFolding : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +struct ToBufferToTensorFolding : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(ToMemrefOp toMemref, + LogicalResult matchAndRewrite(ToBufferOp toBuffer, PatternRewriter &rewriter) const final { BufferizationOptions options; options.bufferAlignment = 0; - return foldToMemrefToTensorPair(rewriter, toMemref, options); + return foldToBufferToTensorPair(rewriter, toBuffer, options); } }; -/// Fold a load on a to_memref operation into an tensor.extract on the +/// Fold a load on a to_buffer operation into an tensor.extract on the /// corresponding tensor. -struct LoadOfToMemref : public OpRewritePattern { +struct LoadOfToBuffer : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(memref::LoadOp load, PatternRewriter &rewriter) const override { - auto toMemref = load.getMemref().getDefiningOp(); - if (!toMemref) + auto toBuffer = load.getMemref().getDefiningOp(); + if (!toBuffer) return failure(); - rewriter.replaceOpWithNewOp(load, toMemref.getTensor(), + rewriter.replaceOpWithNewOp(load, toBuffer.getTensor(), load.getIndices()); return success(); } }; -/// Fold dim of a to_memref into the dim of the tensor. +/// Fold dim of a to_buffer into the dim of the tensor. struct DimOfCastOp : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(memref::DimOp dimOp, PatternRewriter &rewriter) const override { - auto castOp = dimOp.getSource().getDefiningOp(); + auto castOp = dimOp.getSource().getDefiningOp(); if (!castOp) return failure(); Value newSource = castOp.getOperand(); @@ -857,16 +858,16 @@ struct DimOfCastOp : public OpRewritePattern { } // namespace -void ToMemrefOp::getCanonicalizationPatterns(RewritePatternSet &results, +void ToBufferOp::getCanonicalizationPatterns(RewritePatternSet &results, MLIRContext *context) { - results.add(context); + results.add(context); } -LogicalResult ToMemrefOp::bufferize(RewriterBase &rewriter, +LogicalResult ToBufferOp::bufferize(RewriterBase &rewriter, const BufferizationOptions &options) { - // Fold to_memref(to_tensor(x)) to x. Insert a cast if necessary. - (void)foldToMemrefToTensorPair(rewriter, *this, options); + // Fold to_buffer(to_tensor(x)) to x. Insert a cast if necessary. + (void)foldToBufferToTensorPair(rewriter, *this, options); // Note: The return value of `bufferize` indicates whether there was an error // or not. (And not whether the pattern matched or not.) return success(); diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationTypeInterfaces.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationTypeInterfaces.cpp new file mode 100644 index 000000000000..0e973915c6fc --- /dev/null +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationTypeInterfaces.cpp @@ -0,0 +1,21 @@ +//===- BufferizationTypeInterfaces.cpp - Type Interfaces --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h" + +//===----------------------------------------------------------------------===// +// Bufferization Type Interfaces +//===----------------------------------------------------------------------===// + +namespace mlir { +namespace bufferization { + +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.cpp.inc" + +} // namespace bufferization +} // namespace mlir diff --git a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt index 63dcc1eb233e..5d8f0060f2c3 100644 --- a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt @@ -6,6 +6,7 @@ add_mlir_dialect_library(MLIRBufferizationDialect BufferizationDialect.cpp BufferViewFlowOpInterface.cpp UnstructuredControlFlow.cpp + BufferizationTypeInterfaces.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Bufferization diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp index 313dbbd16984..7753c8cc93d8 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -70,11 +70,6 @@ struct OneShotBufferizePass explicit OneShotBufferizePass(const OneShotBufferizationOptions &options) : options(options) {} - void getDependentDialects(DialectRegistry ®istry) const override { - registry - .insert(); - } - void runOnOperation() override { OneShotBufferizationOptions opt; if (!options) { @@ -129,9 +124,9 @@ struct OneShotBufferizePass "'unknown-type-conversion'"); return signalPassFailure(); } - opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace, + opt.unknownTypeConverterFn = [=](TensorType tensorType, + Attribute memorySpace, const BufferizationOptions &options) { - auto tensorType = cast(value.getType()); if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap) return bufferization::getMemRefTypeWithStaticIdentityLayout( tensorType, memorySpace); @@ -230,11 +225,11 @@ namespace { class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { public: BufferizationRewriter(MLIRContext *ctx, DenseSet &erasedOps, - DenseSet &toMemrefOps, + DenseSet &toBufferOps, SmallVector &worklist, const BufferizationOptions &options, BufferizationStatistics *statistics) - : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps), + : IRRewriter(ctx), erasedOps(erasedOps), toBufferOps(toBufferOps), worklist(worklist), analysisState(options), statistics(statistics) { setListener(this); } @@ -243,7 +238,7 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { void notifyOperationErased(Operation *op) override { erasedOps.insert(op); // Erase if present. - toMemrefOps.erase(op); + toBufferOps.erase(op); } void notifyOperationInserted(Operation *op, InsertPoint previous) override { @@ -260,9 +255,9 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { sideEffectingOp.hasEffect()); } - // Keep track of to_memref ops. - if (isa(op)) { - toMemrefOps.insert(op); + // Keep track of to_buffer ops. + if (isa(op)) { + toBufferOps.insert(op); return; } @@ -287,8 +282,8 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { /// A set of all erased ops. DenseSet &erasedOps; - /// A set of all to_memref ops. - DenseSet &toMemrefOps; + /// A set of all to_buffer ops. + DenseSet &toBufferOps; /// The worklist of ops to be bufferized. SmallVector &worklist; @@ -311,9 +306,9 @@ LogicalResult bufferization::bufferizeOp(Operation *op, return failure(); } - // Keep track of to_memref ops. - DenseSet toMemrefOps; - op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); }); + // Keep track of to_buffer ops. + DenseSet toBufferOps; + op->walk([&](ToBufferOp toBufferOp) { toBufferOps.insert(toBufferOp); }); // Gather all bufferizable ops in top-to-bottom order. // @@ -332,7 +327,7 @@ LogicalResult bufferization::bufferizeOp(Operation *op, DenseSet erasedOps; // Bufferize all ops. - BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps, + BufferizationRewriter rewriter(op->getContext(), erasedOps, toBufferOps, worklist, options, statistics); for (unsigned i = 0; i < worklist.size(); ++i) { Operation *nextOp = worklist[i]; @@ -375,11 +370,11 @@ LogicalResult bufferization::bufferizeOp(Operation *op, if (erasedOps.contains(op)) return success(); - // Fold all to_memref(to_tensor(x)) pairs. - for (Operation *op : toMemrefOps) { + // Fold all to_buffer(to_tensor(x)) pairs. + for (Operation *op : toBufferOps) { rewriter.setInsertionPoint(op); - (void)bufferization::foldToMemrefToTensorPair( - rewriter, cast(op), options); + (void)bufferization::foldToBufferToTensorPair( + rewriter, cast(op), options); } // Remove all dead to_tensor ops. @@ -410,8 +405,8 @@ LogicalResult bufferization::bufferizeOp(Operation *op, // Ops without any uses and no side effects will fold away. if (op->getUses().empty() && isMemoryEffectFree(op)) continue; - // ToTensorOps/ToMemrefOps are allowed in the output. - if (isa(op)) + // ToTensorOps/ToBufferOps are allowed in the output. + if (isa(op)) continue; return op->emitError("op was not bufferized"); } @@ -430,17 +425,17 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, // Compute the new signature. SmallVector newTypes; for (BlockArgument &bbArg : block->getArguments()) { - auto tensorType = dyn_cast(bbArg.getType()); + auto tensorType = dyn_cast(bbArg.getType()); if (!tensorType) { newTypes.push_back(bbArg.getType()); continue; } - FailureOr memrefType = + FailureOr bufferType = bufferization::getBufferType(bbArg, options); - if (failed(memrefType)) + if (failed(bufferType)) return failure(); - newTypes.push_back(*memrefType); + newTypes.push_back(*bufferType); } // Change the type of all block arguments. @@ -487,12 +482,12 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, newOperands.push_back(operand); continue; } - FailureOr operandBufferType = + FailureOr operandBufferType = bufferization::getBufferType(operand, options); if (failed(operandBufferType)) return failure(); rewriter.setInsertionPointAfterValue(operand); - Value bufferizedOperand = rewriter.create( + Value bufferizedOperand = rewriter.create( operand.getLoc(), *operandBufferType, operand); // A cast is needed if the operand and the block argument have different // bufferized types. diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp index c45678f1e4b4..5027ad9e3525 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -50,29 +50,47 @@ void FuncAnalysisState::startFunctionAnalysis(FuncOp funcOp) { #endif // NDEBUG } +// Note: this is a local adaptor to unify TensorType and TensorLikeType code +// paths that both work with BufferizationOptions. +static mlir::Attribute +getDefaultMemorySpace(const BufferizationOptions &options, + TensorLikeType type) { + if (auto tensorType = dyn_cast(type)) { + return *options.defaultMemorySpaceFn(tensorType); + } + return nullptr; +} + /// Return the index-th bufferized function argument type. This assumes that the /// specified argument is a tensor. If the tensor is ranked, a layout map may be /// specified by the user (as per `options.functionArgTypeConverterFn`). -static BaseMemRefType +static BufferLikeType getBufferizedFunctionArgType(FuncOp funcOp, int64_t index, const BufferizationOptions &options) { - auto tensorType = - dyn_cast(funcOp.getFunctionType().getInput(index)); - assert(tensorType && "expected TensorType"); - - BaseMemRefType memrefType = options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, options); - - auto layoutAttr = funcOp.getArgAttrOfType( - index, BufferizationDialect::kBufferLayoutAttrName); - if (!layoutAttr) - return memrefType; - - auto rankedMemrefType = dyn_cast(memrefType); - assert(rankedMemrefType && "buffer layout not supported on unranked tensors"); - return MemRefType::get( - rankedMemrefType.getShape(), rankedMemrefType.getElementType(), - layoutAttr.getValue(), rankedMemrefType.getMemorySpace()); + auto type = + dyn_cast(funcOp.getFunctionType().getInput(index)); + assert(type && "expected TensorLikeType"); + + // Note: For builtin tensors there is additional logic related to layout. + if (auto tensorType = dyn_cast(type)) { + BufferLikeType memrefType = options.functionArgTypeConverterFn( + type, *options.defaultMemorySpaceFn(tensorType), funcOp, options); + + auto layoutAttr = funcOp.getArgAttrOfType( + index, BufferizationDialect::kBufferLayoutAttrName); + if (!layoutAttr) + return memrefType; + + auto rankedMemrefType = dyn_cast(memrefType); + assert(rankedMemrefType && + "buffer layout not supported on unranked tensors"); + return cast(MemRefType::get( + rankedMemrefType.getShape(), rankedMemrefType.getElementType(), + layoutAttr, rankedMemrefType.getMemorySpace())); + } + + return options.functionArgTypeConverterFn(type, /*memSpace=*/nullptr, funcOp, + options); } /// Return the FuncOp called by `callOp`. @@ -195,7 +213,7 @@ struct CallOpInterface return result; } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto callOp = cast(op); @@ -207,13 +225,14 @@ struct CallOpInterface FunctionType funcType = funcOp.getFunctionType(); Type resultType = funcType.getResult(cast(value).getResultNumber()); - if (auto bufferizedType = dyn_cast(resultType)) + if (auto bufferizedType = dyn_cast(resultType)) return bufferizedType; // Otherwise, call the type converter to compute the bufferized type. - auto tensorType = cast(resultType); - return options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, options); + auto tensorType = cast(resultType); + return cast(options.functionArgTypeConverterFn( + tensorType, getDefaultMemorySpace(options, tensorType), funcOp, + options)); } /// All function arguments are writable. It is the responsibility of the @@ -226,14 +245,14 @@ struct CallOpInterface SmallVector resultTypes; for (Value result : callOp.getResults()) { Type returnType = result.getType(); - if (!isa(returnType)) { + if (!isa(returnType)) { // Non-tensor values are returned. resultTypes.push_back(returnType); continue; } // Returning a memref. - FailureOr resultType = + FailureOr resultType = bufferization::getBufferType(result, options); if (failed(resultType)) return failure(); @@ -249,7 +268,7 @@ struct CallOpInterface for (OpOperand &opOperand : callOp->getOpOperands()) { // Non-tensor operands are just copied. - if (!isa(opOperand.get().getType())) { + if (!isa(opOperand.get().getType())) { newOperands.push_back(opOperand.get()); continue; } @@ -262,27 +281,27 @@ struct CallOpInterface Value buffer = *maybeBuffer; // Caller / callee type mismatch is handled with castOrReallocMemRefValue. - auto memRefType = funcType.getInput(opOperand.getOperandNumber()); - if (!isa(memRefType)) { + auto bufferType = funcType.getInput(opOperand.getOperandNumber()); + if (!isa(bufferType)) { // The called function was not bufferized yet. This can happen when // there cycles in the function call graph. Compute the bufferized // result type. - FailureOr maybeMemRefType = + FailureOr maybeBufferType = bufferization::getBufferType( funcOp.getArgument(opOperand.getOperandNumber()), options); - if (failed(maybeMemRefType)) + if (failed(maybeBufferType)) return failure(); - memRefType = *maybeMemRefType; + bufferType = *maybeBufferType; } - // Since we don't yet have a clear layout story, to_memref may + // Since we don't yet have a clear layout story, to_buffer may // conservatively turn tensors into more dynamic memref than necessary. // If the memref type of the callee fails, introduce an extra memref.cast // that will either canonicalize away or fail compilation until we can do // something better. Insert a reallocation + copy if it cannot be // statically guaranteed that a direct cast would be valid. - if (buffer.getType() != memRefType) { - auto memrefDstType = dyn_cast(memRefType); + if (buffer.getType() != bufferType) { + auto memrefDstType = dyn_cast(bufferType); assert(memrefDstType && "buffer layout not supported on unranked tensors"); FailureOr replacement = bufferization::castOrReallocMemRefValue( @@ -344,7 +363,7 @@ struct FuncOpInterface static bool supportsUnstructuredControlFlow() { return true; } bool hasTensorSemantics(Operation *op) const { - auto isaTensor = llvm::IsaPred; + auto isaTensor = llvm::IsaPred; // A function has tensor semantics if it has tensor arguments/results. auto funcOp = cast(op); @@ -371,7 +390,7 @@ struct FuncOpInterface return getAliasingBranchOpOperands(op, cast(value), state); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto funcOp = cast(op); @@ -402,7 +421,7 @@ struct FuncOpInterface SmallVector argTypes; for (const auto &it : llvm::enumerate(funcType.getInputs())) { Type argType = it.value(); - if (isa(argType)) { + if (isa(argType)) { argTypes.push_back( getBufferizedFunctionArgType(funcOp, it.index(), options)); continue; @@ -413,9 +432,9 @@ struct FuncOpInterface // Compute the result types. SmallVector retTypes; for (Type resultType : funcType.getResults()) { - if (auto tensorType = dyn_cast(resultType)) { - BaseMemRefType resultType = options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, + if (auto tensorType = dyn_cast(resultType)) { + BufferLikeType resultType = options.functionArgTypeConverterFn( + tensorType, getDefaultMemorySpace(options, tensorType), funcOp, options); retTypes.push_back(resultType); continue; @@ -445,7 +464,7 @@ struct FuncOpInterface SmallVector returnValues; for (auto [returnVal, bufferizedType] : llvm::zip_equal(returnOp->getOperands(), retTypes)) { - auto tensorType = dyn_cast(returnVal.getType()); + auto tensorType = dyn_cast(returnVal.getType()); rewriter.setInsertionPoint(returnOp); // If not a tensor type just forward it. @@ -456,9 +475,9 @@ struct FuncOpInterface // Note: If `inferFunctionResultLayout = true`, casts are later folded // away. - Value toMemrefOp = rewriter.create( + Value toBufferOp = rewriter.create( returnOp.getLoc(), bufferizedType, returnVal); - returnValues.push_back(toMemrefOp); + returnValues.push_back(toBufferOp); } returnOp.getOperandsMutable().assign(returnValues); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp index fc1b221b4f03..53837a79e7d6 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp @@ -31,7 +31,7 @@ // Ops that do not implement `BufferizableOpInterface` can be analyzed but are // treated conservatively. E.g., the analysis has to assume that their tensor // OpOperands bufferize to memory writes. While such ops can be analyzed, they -// are not bufferized and remain in the IR. to_tensor and to_memref ops are +// are not bufferized and remain in the IR. to_tensor and to_buffer ops are // inserted at the bufferization boundary. // // This analysis caters to high-performance codegen where buffer reuse is deemed diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt index 30d24df93a58..80b0ef068d96 100644 --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -42,9 +42,7 @@ add_subdirectory(UB) add_subdirectory(Utils) add_subdirectory(Vector) add_subdirectory(X86Vector) -if(MLIR_DIALECT_XEGPU_ENABLE) - add_subdirectory(XeGPU) -endif() +add_subdirectory(XeGPU) set(LLVM_OPTIONAL_SOURCES Traits.cpp diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp index 6c1087730ebb..c17aa9906e56 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp @@ -75,19 +75,19 @@ static void createMemcpy(OpBuilder &b, Location loc, Value tensorSource, // TODO: Support custom memory space on source. // We do not know the layout map of the source yet, so use a fully dynamic // layout for best compatibility. - Value toMemref = b.create( + Value toBuffer = b.create( loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType), tensorSource, /*readOnly=*/true); - b.create(loc, toMemref, memrefDest); + b.create(loc, toBuffer, memrefDest); } break; case linalg::BufferizeToAllocationOptions::MemcpyOp::LinalgCopy: { // TODO: Support custom memory space on source. // We do not know the layout map of the source yet, so use a fully dynamic // layout for best compatibility. - Value toMemref = b.create( + Value toBuffer = b.create( loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType), tensorSource, /*readOnly=*/true); - b.create(loc, toMemref, memrefDest); + b.create(loc, toBuffer, memrefDest); } break; }; } @@ -252,7 +252,8 @@ Value linalg::bufferizeToAllocation( // Create bufferization.to_tensor with "restrict" and "writable". The returned // tensor is a new buffer allocation, so it does not alias with any buffer. Value toTensorOp = rewriter.create( - loc, alloc, /*restrict=*/true, /*writable=*/true); + loc, padOp.getResult().getType(), alloc, /*restrict=*/true, + /*writable=*/true); rewriter.replaceOp(padOp, toTensorOp); return alloc; } @@ -335,7 +336,8 @@ Value linalg::bufferizeToAllocation( // Create bufferization.to_tensor with "restrict" and "writable". The returned // tensor is a new buffer allocation, so it does not alias with any buffer. Value toTensorOp = rewriter.create( - loc, alloc, /*restrict=*/true, /*writable=*/true); + loc, allocTensorOp.getResult().getType(), alloc, /*restrict=*/true, + /*writable=*/true); rewriter.replaceOp(allocTensorOp, toTensorOp); return alloc; } @@ -559,7 +561,8 @@ Value linalg::bufferizeToAllocation( createMemcpy(rewriter, op->getLoc(), operand->get(), alloc, options); } rewriter.modifyOpInPlace(op, [&]() { - auto toTensorOp = rewriter.create(op->getLoc(), alloc); + auto toTensorOp = rewriter.create( + op->getLoc(), operand->get().getType(), alloc); operand->set(toTensorOp); if (options.bufferizeDestinationOnly) { rewriter.modifyOpInPlace(toTensorOp, [&]() { diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp index c35a2cf35c2f..556922a64b09 100644 --- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp +++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp @@ -153,7 +153,7 @@ DiagnosedSilenceableFailure transform::CreateAsyncGroupsOp::applyToOne( /// Returns true if the given type has the default memory space. static bool hasDefaultMemorySpace(BaseMemRefType type) { - return !type.getMemorySpace(); + return !type.getMemorySpace() || type.getMemorySpaceAsInt() == 0; } /// Returns true if the given type has the shared (workgroup) memory space. diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp index e9d7dc1b847c..d096b39b6472 100644 --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -106,7 +106,7 @@ struct ConditionOpInterface FailureOr maybeBuffer = getBuffer(rewriter, value, options); if (failed(maybeBuffer)) return failure(); - FailureOr resultType = bufferization::getBufferType( + FailureOr resultType = bufferization::getBufferType( whileOp.getAfterArguments()[it.index()], options); if (failed(resultType)) return failure(); @@ -270,7 +270,7 @@ struct IfOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto ifOp = cast(op); @@ -287,8 +287,8 @@ struct IfOpInterface // True branch was already bufferized. thenBufferType = cast(thenValue.getType()); } else { - auto maybeBufferType = - bufferization::getBufferType(thenValue, options, invocationStack); + auto maybeBufferType = bufferization::detail::asMemRefType( + bufferization::getBufferType(thenValue, options, invocationStack)); if (failed(maybeBufferType)) return failure(); thenBufferType = *maybeBufferType; @@ -297,8 +297,8 @@ struct IfOpInterface // False branch was already bufferized. elseBufferType = cast(elseValue.getType()); } else { - auto maybeBufferType = - bufferization::getBufferType(elseValue, options, invocationStack); + auto maybeBufferType = bufferization::detail::asMemRefType( + bufferization::getBufferType(elseValue, options, invocationStack)); if (failed(maybeBufferType)) return failure(); elseBufferType = *maybeBufferType; @@ -306,15 +306,15 @@ struct IfOpInterface // Best case: Both branches have the exact same buffer type. if (thenBufferType == elseBufferType) - return thenBufferType; + return cast(thenBufferType); // Memory space mismatch. if (thenBufferType.getMemorySpace() != elseBufferType.getMemorySpace()) return op->emitError("inconsistent memory space on then/else branches"); // Layout maps are different: Promote to fully dynamic layout map. - return getMemRefTypeWithFullyDynamicLayout( - cast(opResult.getType()), thenBufferType.getMemorySpace()); + return cast(getMemRefTypeWithFullyDynamicLayout( + cast(opResult.getType()), thenBufferType.getMemorySpace())); } }; @@ -384,7 +384,7 @@ struct IndexSwitchOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto switchOp = cast(op); @@ -400,9 +400,7 @@ struct IndexSwitchOpInterface return bufferType; auto maybeBufferType = bufferization::getBufferType(yieldedValue, options, invocationStack); - if (failed(maybeBufferType)) - return failure(); - return maybeBufferType; + return bufferization::detail::asMemRefType(maybeBufferType); }; // Compute buffer type of the default case. @@ -430,7 +428,7 @@ struct IndexSwitchOpInterface cast(value.getType()), bufferType.getMemorySpace()); } - return bufferType; + return cast(bufferType); } }; @@ -516,7 +514,7 @@ getBbArgReplacements(RewriterBase &rewriter, Block::BlockArgListType bbArgs, /// If both buffer types are equal, no casts are needed the computed buffer type /// can be used directly. Otherwise, the buffer types can only differ in their /// layout map and a cast must be inserted. -static FailureOr computeLoopRegionIterArgBufferType( +static FailureOr computeLoopRegionIterArgBufferType( Operation *loopOp, BlockArgument iterArg, Value initArg, Value yieldedValue, const BufferizationOptions &options, SmallVector &invocationStack) { // Determine the buffer type of the init_arg. @@ -540,10 +538,10 @@ static FailureOr computeLoopRegionIterArgBufferType( } // Compute the buffer type of the yielded value. - BaseMemRefType yieldedValueBufferType; + BufferLikeType yieldedValueBufferType; if (isa(yieldedValue.getType())) { // scf.yield was already bufferized. - yieldedValueBufferType = cast(yieldedValue.getType()); + yieldedValueBufferType = cast(yieldedValue.getType()); } else { // Note: This typically triggers a recursive call for the buffer type of // the iter_arg. @@ -576,8 +574,8 @@ static FailureOr computeLoopRegionIterArgBufferType( "expected same shape"); } #endif // NDEBUG - return getMemRefTypeWithFullyDynamicLayout( - iterTensorType, yieldedBufferType.getMemorySpace()); + return cast(getMemRefTypeWithFullyDynamicLayout( + iterTensorType, yieldedBufferType.getMemorySpace())); } /// Return `true` if the given loop may have 0 iterations. @@ -697,7 +695,7 @@ struct ForOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto forOp = cast(op); @@ -1024,7 +1022,7 @@ struct WhileOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto whileOp = cast(op); @@ -1057,7 +1055,7 @@ struct WhileOpInterface Value conditionYieldedVal = whileOp.getConditionOp().getArgs()[resultNum]; if (!isa(conditionYieldedVal.getType())) { // scf.condition was already bufferized. - return cast(conditionYieldedVal.getType()); + return cast(conditionYieldedVal.getType()); } return bufferization::getBufferType(conditionYieldedVal, options, invocationStack); @@ -1164,14 +1162,14 @@ struct YieldOpInterface // We may have to cast the value before yielding it. if (isa( yieldOp->getParentOp())) { - FailureOr resultType = bufferization::getBufferType( + FailureOr resultType = bufferization::getBufferType( yieldOp->getParentOp()->getResult(it.index()), options); if (failed(resultType)) return failure(); buffer = castBuffer(rewriter, buffer, *resultType); } else if (auto whileOp = dyn_cast(yieldOp->getParentOp())) { - FailureOr resultType = bufferization::getBufferType( + FailureOr resultType = bufferization::getBufferType( whileOp.getBeforeArguments()[it.index()], options); if (failed(resultType)) return failure(); @@ -1294,7 +1292,7 @@ struct ForallOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto forallOp = cast(op); diff --git a/mlir/lib/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.cpp index 66a2e4500178..6919b561f147 100644 --- a/mlir/lib/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.cpp @@ -66,7 +66,7 @@ struct AssumingOpInterface for (const auto &it : llvm::enumerate(assumingOp->getResultTypes())) { if (isa(it.value())) { newResults.push_back(rewriter.create( - assumingOp.getLoc(), newOp->getResult(it.index()))); + assumingOp.getLoc(), it.value(), newOp->getResult(it.index()))); } else { newResults.push_back(newOp->getResult(it.index())); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp index b7fac163ba5f..cc4f115e015e 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp @@ -212,7 +212,7 @@ static Value genTensorToMemref(PatternRewriter &rewriter, Location loc, auto tensorType = llvm::cast(tensor.getType()); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); - return rewriter.create(loc, memrefType, tensor); + return rewriter.create(loc, memrefType, tensor); } /// Prepares the outlined arguments, passing scalars and buffers in. Here we @@ -651,7 +651,7 @@ static LogicalResult rewriteSpMV(PatternRewriter &rewriter, tokens.clear(); // Done. - rewriter.replaceOpWithNewOp(op, memY); + rewriter.replaceOpWithNewOp(op, y.getType(), memY); return success(); } @@ -752,7 +752,7 @@ static LogicalResult rewriteSpMM(PatternRewriter &rewriter, tokens.clear(); // Done. - rewriter.replaceOpWithNewOp(op, bufC); + rewriter.replaceOpWithNewOp(op, c.getType(), bufC); return success(); } @@ -925,9 +925,12 @@ static LogicalResult rewriteSpGEMM(PatternRewriter &rewriter, tokens.clear(); // Done. - Value vt = rewriter.create(loc, valH); - Value rt = rewriter.create(loc, rowH); - Value ct = rewriter.create(loc, colH); + Value vt = rewriter.create( + loc, memref::getTensorTypeFromMemRefType(valH.getType()), valH); + Value rt = rewriter.create( + loc, memref::getTensorTypeFromMemRefType(rowH.getType()), rowH); + Value ct = rewriter.create( + loc, memref::getTensorTypeFromMemRefType(colH.getType()), colH); rewriter.replaceOpWithNewOp(op, c.getType(), ValueRange{rt, ct}, vt); return success(); @@ -1044,7 +1047,7 @@ static LogicalResult rewrite2To4SpMM(PatternRewriter &rewriter, tokens.clear(); // Done. - rewriter.replaceOpWithNewOp(op, bufC); + rewriter.replaceOpWithNewOp(op, C.getType(), bufC); return success(); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp index 20d46f7ca00c..5525427502d1 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -1478,7 +1478,8 @@ struct SparseDisassembleOpConverter // Converts MemRefs back to Tensors. SmallVector retValues = llvm::to_vector( llvm::map_range(retMem, [&rewriter, loc](Value v) -> Value { - return rewriter.create(loc, v); + return rewriter.create( + loc, memref::getTensorTypeFromMemRefType(v.getType()), v); })); // Appends the actual memory length used in each buffer returned. retValues.append(retLen.begin(), retLen.end()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp index 9ffa64dc821d..7f0b65768744 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -867,7 +867,9 @@ class SparseTensorDisassembleConverter // Converts MemRefs back to Tensors. assert(retVal.size() + retLen.size() == op.getNumResults()); for (unsigned i = 0, sz = retVal.size(); i < sz; i++) { - auto tensor = rewriter.create(loc, retVal[i]); + auto tensor = rewriter.create( + loc, memref::getTensorTypeFromMemRefType(retVal[i].getType()), + retVal[i]); retVal[i] = rewriter.create(loc, op.getResultTypes()[i], tensor); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp index 6e882a8d0ff3..3d951c27c3a6 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp @@ -218,10 +218,10 @@ mlir::getBufferizationOptionsForSparsification(bool analysisOnly) { OneShotBufferizationOptions options; options.bufferizeFunctionBoundaries = true; options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap); - options.unknownTypeConverterFn = [](Value value, Attribute memorySpace, + options.unknownTypeConverterFn = [](TensorType tensorType, + Attribute memorySpace, const BufferizationOptions &options) { - return getMemRefTypeWithStaticIdentityLayout( - cast(value.getType()), memorySpace); + return getMemRefTypeWithStaticIdentityLayout(tensorType, memorySpace); }; if (analysisOnly) { options.testAnalysisOnly = true; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp index f92382472b47..f08d80ad2697 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp @@ -550,8 +550,8 @@ TypedValue sparse_tensor::genToMemref(OpBuilder &builder, Location loc, Value tensor) { auto tTp = llvm::cast(tensor.getType()); auto mTp = MemRefType::get(tTp.getShape(), tTp.getElementType()); - return builder.create(loc, mTp, tensor) - .getResult(); + return cast>( + builder.create(loc, mTp, tensor).getResult()); } Value sparse_tensor::createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp index ea5533dfc6ba..fcd39fa5f0ec 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp @@ -263,7 +263,7 @@ void LoopEmitter::initializeLoopEmit( denseTp = bufferization::getMemRefTypeWithFullyDynamicLayout(rtp); Value denseVal = - builder.create(loc, denseTp, tensor); + builder.create(loc, denseTp, tensor); // Dense outputs need special handling. if (isOutput && updater) denseVal = updater(builder, loc, denseVal, tensor); diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp index dca3a8146195..715420bef8b1 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -49,12 +49,13 @@ struct CastOpInterface return {{op->getResult(0), BufferRelation::Equivalent}}; } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto castOp = cast(op); - auto maybeSrcBufferType = bufferization::getBufferType( - castOp.getSource(), options, invocationStack); + auto maybeSrcBufferType = + bufferization::detail::asMemRefType(bufferization::getBufferType( + castOp.getSource(), options, invocationStack)); if (failed(maybeSrcBufferType)) return failure(); Attribute memorySpace = maybeSrcBufferType->getMemorySpace(); @@ -66,18 +67,20 @@ struct CastOpInterface if (isa(castOp.getSource().getType())) { // When casting to a ranked tensor, we cannot infer any static offset or // strides from the source. Assume fully dynamic. - return getMemRefTypeWithFullyDynamicLayout(castOp.getType(), memorySpace); + return cast( + getMemRefTypeWithFullyDynamicLayout(castOp.getType(), memorySpace)); } // Case 2: Casting to an unranked tensor type if (isa(castOp.getType())) { - return getMemRefTypeWithFullyDynamicLayout(castOp.getType(), memorySpace); + return cast( + getMemRefTypeWithFullyDynamicLayout(castOp.getType(), memorySpace)); } // Case 3: Ranked tensor -> ranked tensor. The offsets and strides do not // change. auto rankedResultType = cast(castOp.getType()); - return cast(MemRefType::get( + return cast(MemRefType::get( rankedResultType.getShape(), rankedResultType.getElementType(), llvm::cast(*maybeSrcBufferType).getLayout(), memorySpace)); } @@ -138,7 +141,7 @@ struct CollapseShapeOpInterface return {{op->getOpResult(0), BufferRelation::Equivalent}}; } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto collapseShapeOp = cast(op); @@ -153,11 +156,12 @@ struct CollapseShapeOpInterface if (!canBeCollapsed) { // If dims cannot be collapsed, this op bufferizes to a new allocation. RankedTensorType tensorResultType = collapseShapeOp.getResultType(); - return bufferization::getMemRefTypeWithStaticIdentityLayout( - tensorResultType, srcBufferType.getMemorySpace()); + return cast( + bufferization::getMemRefTypeWithStaticIdentityLayout( + tensorResultType, srcBufferType.getMemorySpace())); } - return cast(memref::CollapseShapeOp::computeCollapsedType( + return cast(memref::CollapseShapeOp::computeCollapsedType( srcBufferType, collapseShapeOp.getReassociationIndices())); } @@ -215,7 +219,7 @@ struct CollapseShapeOpInterface MemRefType::get(collapseShapeOp.getSrcType().getShape(), collapseShapeOp.getSrcType().getElementType(), AffineMap(), bufferType.getMemorySpace()); - buffer = rewriter.create( + buffer = rewriter.create( op->getLoc(), memrefType, *tensorAlloc); } @@ -311,7 +315,7 @@ struct ExpandShapeOpInterface return {{op->getOpResult(0), BufferRelation::Equivalent}}; } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto expandShapeOp = cast(op); @@ -325,7 +329,7 @@ struct ExpandShapeOpInterface expandShapeOp.getReassociationIndices()); if (failed(maybeResultType)) return failure(); - return cast(*maybeResultType); + return cast(*maybeResultType); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -395,7 +399,7 @@ struct ExtractSliceOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto extractSliceOp = cast(op); @@ -407,7 +411,7 @@ struct ExtractSliceOpInterface SmallVector mixedOffsets = extractSliceOp.getMixedOffsets(); SmallVector mixedSizes = extractSliceOp.getMixedSizes(); SmallVector mixedStrides = extractSliceOp.getMixedStrides(); - return cast(memref::SubViewOp::inferRankReducedResultType( + return cast(memref::SubViewOp::inferRankReducedResultType( extractSliceOp.getType().getShape(), llvm::cast(*srcMemrefType), mixedOffsets, mixedSizes, mixedStrides)); @@ -489,11 +493,11 @@ struct FromElementsOpInterface /*copy=*/false); if (failed(tensorAlloc)) return failure(); - FailureOr memrefType = + FailureOr memrefType = bufferization::getBufferType(*tensorAlloc, options); if (failed(memrefType)) return failure(); - Value buffer = rewriter.create( + Value buffer = rewriter.create( op->getLoc(), *memrefType, *tensorAlloc); // Case: tensor<0xelem_type>. @@ -738,19 +742,21 @@ struct PadOpInterface return {}; } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { // Infer memory space from the source tensor. auto padOp = cast(op); - auto maybeSrcBufferType = bufferization::getBufferType( - padOp.getSource(), options, invocationStack); + auto maybeSrcBufferType = + bufferization::detail::asMemRefType(bufferization::getBufferType( + padOp.getSource(), options, invocationStack)); if (failed(maybeSrcBufferType)) return failure(); MemRefLayoutAttrInterface layout; - return cast(MemRefType::get(padOp.getResultType().getShape(), - padOp.getResultType().getElementType(), layout, - maybeSrcBufferType->getMemorySpace())); + return cast( + MemRefType::get(padOp.getResultType().getShape(), + padOp.getResultType().getElementType(), layout, + maybeSrcBufferType->getMemorySpace())); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -892,7 +898,7 @@ struct ReshapeOpInterface srcType.getShape(), srcType.getElementType(), AffineMap(), cast(srcBuffer->getType()).getMemorySpace()); srcBuffer = rewriter - .create( + .create( op->getLoc(), memrefType, *tensorAlloc) .getResult(); } @@ -902,7 +908,7 @@ struct ReshapeOpInterface return success(); } - FailureOr + FailureOr getBufferType(Operation *op, Value value, const BufferizationOptions &options, SmallVector &invocationStack) const { auto reshapeOp = cast(op); @@ -911,9 +917,9 @@ struct ReshapeOpInterface reshapeOp.getSource(), options, invocationStack); if (failed(maybeSourceBufferType)) return failure(); - return getMemRefTypeWithStaticIdentityLayout( + return cast(getMemRefTypeWithStaticIdentityLayout( reshapeOp.getResult().getType(), - cast(maybeSourceBufferType.value()).getMemorySpace()); + cast(maybeSourceBufferType.value()).getMemorySpace())); } }; diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index f8ac5b748302..44b267b24d8e 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -210,6 +210,10 @@ LogicalResult OpaqueType::verify(function_ref emitError, // VectorType //===----------------------------------------------------------------------===// +bool VectorType::isValidElementType(Type t) { + return isValidVectorTypeElementType(t); +} + LogicalResult VectorType::verify(function_ref emitError, ArrayRef shape, Type elementType, ArrayRef scalableDims) { @@ -248,6 +252,49 @@ VectorType VectorType::cloneWith(std::optional> shape, getScalableDims()); } +//===----------------------------------------------------------------------===// +// TensorType +//===----------------------------------------------------------------------===// + +Type TensorType::getElementType() const { + return llvm::TypeSwitch(*this) + .Case( + [](auto type) { return type.getElementType(); }); +} + +bool TensorType::hasRank() const { + return !llvm::isa(*this); +} + +ArrayRef TensorType::getShape() const { + return llvm::cast(*this).getShape(); +} + +TensorType TensorType::cloneWith(std::optional> shape, + Type elementType) const { + if (llvm::dyn_cast(*this)) { + if (shape) + return RankedTensorType::get(*shape, elementType); + return UnrankedTensorType::get(elementType); + } + + auto rankedTy = llvm::cast(*this); + if (!shape) + return RankedTensorType::get(rankedTy.getShape(), elementType, + rankedTy.getEncoding()); + return RankedTensorType::get(shape.value_or(rankedTy.getShape()), elementType, + rankedTy.getEncoding()); +} + +RankedTensorType TensorType::clone(::llvm::ArrayRef shape, + Type elementType) const { + return ::llvm::cast(cloneWith(shape, elementType)); +} + +RankedTensorType TensorType::clone(::llvm::ArrayRef shape) const { + return ::llvm::cast(cloneWith(shape, getElementType())); +} + // Check if "elementType" can be an element type of a tensor. static LogicalResult checkTensorElementType(function_ref emitError, @@ -271,12 +318,6 @@ bool TensorType::isValidElementType(Type type) { // RankedTensorType //===----------------------------------------------------------------------===// -RankedTensorType RankedTensorType::cloneWith(std::optional> shape, - Type elementType) const { - return RankedTensorType::get(shape.value_or(getShape()), elementType, - getEncoding()); -} - LogicalResult RankedTensorType::verify(function_ref emitError, ArrayRef shape, Type elementType, @@ -294,14 +335,6 @@ RankedTensorType::verify(function_ref emitError, // UnrankedTensorType //===----------------------------------------------------------------------===// -TensorType UnrankedTensorType::cloneWith(std::optional> shape, - Type elementType) const { - if (shape) - return RankedTensorType::get(*shape, elementType); - - return UnrankedTensorType::get(elementType); -} - LogicalResult UnrankedTensorType::verify(function_ref emitError, Type elementType) { @@ -309,9 +342,55 @@ UnrankedTensorType::verify(function_ref emitError, } //===----------------------------------------------------------------------===// -// BaseMemRefTypeInterface +// BaseMemRefType //===----------------------------------------------------------------------===// +Type BaseMemRefType::getElementType() const { + return llvm::TypeSwitch(*this) + .Case( + [](auto type) { return type.getElementType(); }); +} + +bool BaseMemRefType::hasRank() const { + return !llvm::isa(*this); +} + +ArrayRef BaseMemRefType::getShape() const { + return llvm::cast(*this).getShape(); +} + +BaseMemRefType BaseMemRefType::cloneWith(std::optional> shape, + Type elementType) const { + if (llvm::dyn_cast(*this)) { + if (!shape) + return UnrankedMemRefType::get(elementType, getMemorySpace()); + MemRefType::Builder builder(*shape, elementType); + builder.setMemorySpace(getMemorySpace()); + return builder; + } + + MemRefType::Builder builder(llvm::cast(*this)); + if (shape) + builder.setShape(*shape); + builder.setElementType(elementType); + return builder; +} + +MemRefType BaseMemRefType::clone(::llvm::ArrayRef shape, + Type elementType) const { + return ::llvm::cast(cloneWith(shape, elementType)); +} + +MemRefType BaseMemRefType::clone(::llvm::ArrayRef shape) const { + return ::llvm::cast(cloneWith(shape, getElementType())); +} + +Attribute BaseMemRefType::getMemorySpace() const { + if (auto rankedMemRefTy = llvm::dyn_cast(*this)) + return rankedMemRefTy.getMemorySpace(); + return llvm::cast(*this).getMemorySpace(); +} + unsigned BaseMemRefType::getMemorySpaceAsInt() const { if (auto rankedMemRefTy = llvm::dyn_cast(*this)) return rankedMemRefTy.getMemorySpaceAsInt(); @@ -431,15 +510,6 @@ unsigned mlir::detail::getMemorySpaceAsInt(Attribute memorySpace) { return static_cast(llvm::cast(memorySpace).getInt()); } -MemRefType MemRefType::cloneWith(std::optional> shape, - Type elementType) const { - MemRefType::Builder builder(llvm::cast(*this)); - if (shape) - builder.setShape(*shape); - builder.setElementType(elementType); - return builder; -} - unsigned MemRefType::getMemorySpaceAsInt() const { return detail::getMemorySpaceAsInt(getMemorySpace()); } @@ -764,7 +834,7 @@ static LogicalResult getStridesAndOffset(MemRefType t, } LogicalResult MemRefType::getStridesAndOffset(SmallVectorImpl &strides, - int64_t &offset) { + int64_t &offset) { // Happy path: the type uses the strided layout directly. if (auto strided = llvm::dyn_cast(getLayout())) { llvm::append_range(strides, strided.getStrides()); @@ -818,15 +888,6 @@ bool MemRefType::isLastDimUnitStride() { // UnrankedMemRefType //===----------------------------------------------------------------------===// -BaseMemRefType UnrankedMemRefType::cloneWith(std::optional> shape, - Type elementType) const { - if (!shape) - return UnrankedMemRefType::get(elementType, getMemorySpace()); - MemRefType::Builder builder(*shape, elementType); - builder.setMemorySpace(getMemorySpace()); - return (MemRefType)builder; -} - unsigned UnrankedMemRefType::getMemorySpaceAsInt() const { return detail::getMemorySpaceAsInt(getMemorySpace()); } diff --git a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir index c1aef97438bd..34a4e0e83410 100644 --- a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir +++ b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir @@ -164,7 +164,7 @@ func.func @update_halo_3d_tensor( // CHECK-NEXT: [[vc44_i32:%.*]] = arith.constant 44 : i32 // CHECK-NEXT: [[vc4_i32:%.*]] = arith.constant 4 : i32 // CHECK-NEXT: [[vc91_i32:%.*]] = arith.constant 91 : i32 - // CHECK-NEXT: [[v0:%.*]] = bufferization.to_memref [[varg0]] : tensor<120x120x120xi8> to memref<120x120x120xi8> + // CHECK-NEXT: [[v0:%.*]] = bufferization.to_buffer [[varg0]] : tensor<120x120x120xi8> to memref<120x120x120xi8> // CHECK-NEXT: [[valloc:%.*]] = memref.alloc() : memref<117x113x5xi8> // CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[v0]][1, 3, 109] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>> // CHECK-NEXT: memref.copy [[vsubview]], [[valloc]] : memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>> to memref<117x113x5xi8> diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir index bf5e65f1b3f7..36eb4d4669b0 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir @@ -1,4 +1,3 @@ -// REQUIRES: tosa-to-tensor-enabled // RUN: mlir-opt --split-input-file -pass-pipeline="builtin.module(func.func(tosa-to-tensor))" %s -verify-diagnostics // CHECK-LABEL: @slice_resultType_unranked diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir index 7be8f417ee2f..27018fb79f60 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -1,4 +1,3 @@ -// REQUIRES: tosa-to-tensor-enabled // RUN: mlir-opt --split-input-file --tosa-to-tensor %s -o -| FileCheck %s // ----- diff --git a/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir index 682ab6ec086f..7cef17df79dd 100644 --- a/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @load_1D_vector(%source: memref<8x16x32xf32>, %offset: index) -> vector<8xf32> { diff --git a/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir index 404c612e48df..4f069ebc39db 100644 --- a/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @store_1D_vector(%vec: vector<8xf32>, diff --git a/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir index 284c06c3b401..497eb86cea83 100644 --- a/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @load_1D_vector(%source: memref<8x16x32xf32>, %offset: index) -> vector<8xf32> { diff --git a/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir index 6cfb19b4142d..91e3fb3841f6 100644 --- a/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @store_1D_vector(%vec: vector<8xf32>, diff --git a/mlir/test/Dialect/Affine/loop-fusion-4.mlir b/mlir/test/Dialect/Affine/loop-fusion-4.mlir index ea144f73bb21..96088fe5deba 100644 --- a/mlir/test/Dialect/Affine/loop-fusion-4.mlir +++ b/mlir/test/Dialect/Affine/loop-fusion-4.mlir @@ -242,7 +242,7 @@ module { ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): tensor.yield %cst_f32 : f32 } : tensor<1x32x32x8xf32> to tensor<1x40x8229x8xf32> - %1 = bufferization.to_memref %padded : tensor<1x40x8229x8xf32> to memref<1x40x8229x8xf32> + %1 = bufferization.to_buffer %padded : tensor<1x40x8229x8xf32> to memref<1x40x8229x8xf32> %alloc_0 = memref.alloc() {alignment = 64 : i64} : memref<1x32x32x8xf32> affine.for %arg1 = 0 to 1 { affine.for %arg2 = 0 to 32 { diff --git a/mlir/test/Dialect/Arith/bufferize.mlir b/mlir/test/Dialect/Arith/bufferize.mlir index 0b7838e1471d..d9d0cde642be 100644 --- a/mlir/test/Dialect/Arith/bufferize.mlir +++ b/mlir/test/Dialect/Arith/bufferize.mlir @@ -7,7 +7,7 @@ func.func @index_cast(%tensor: tensor, %scalar: i32) -> (tensor, ind %index_scalar = arith.index_cast %scalar : i32 to index return %index_tensor, %index_scalar : tensor, index } -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor // CHECK-NEXT: %[[INDEX_MEMREF:.*]] = arith.index_cast %[[MEMREF]] // CHECK-SAME: memref to memref // CHECK-NEXT: %[[INDEX_TENSOR:.*]] = bufferization.to_tensor %[[INDEX_MEMREF]] @@ -83,8 +83,8 @@ func.func @non_tensor() { // CHECK-SAME: %[[PRED:.*]]: i1, // CHECK-SAME: %[[TRUE_VAL:.*]]: tensor, // CHECK-SAME: %[[FALSE_VAL:.*]]: tensor) -> tensor { -// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : tensor -// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : tensor +// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_buffer %[[TRUE_VAL]] : tensor +// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_buffer %[[FALSE_VAL]] : tensor // CHECK: %[[RET_MEMREF:.*]] = arith.select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref // CHECK: return %[[RET]] : tensor diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir index 5d0657eb38ba..2204c6fae50d 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir @@ -5,11 +5,11 @@ // no memref operands. // CHECK-LABEL: func private @no_interface_no_operands( -// CHECK-NEXT: %[[m:.*]] = bufferization.to_memref +// CHECK-NEXT: %[[m:.*]] = bufferization.to_buffer // CHECK-NEXT: %[[clone:.*]] = bufferization.clone %[[m]] // CHECK-NEXT: return %[[clone]] func.func private @no_interface_no_operands(%t : tensor) -> memref { - %0 = bufferization.to_memref %t : tensor to memref + %0 = bufferization.to_buffer %t : tensor to memref return %0 : memref } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir index 8f0170b17381..4c7683ec211e 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir @@ -8,7 +8,7 @@ // CHECK-LABEL: func @buffer_not_deallocated( // CHECK-SAME: %[[t:.*]]: tensor func.func @buffer_not_deallocated(%t : tensor, %c : i1) -> tensor { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[r:.*]] = scf.if %{{.*}} { %r = scf.if %c -> tensor { // CHECK: %[[some_op:.*]] = "test.some_op" @@ -37,7 +37,7 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor, %cond: i1, %val: i32) -> tensor { - // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]] + // CHECK: %[[arg0_m:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[r:.*]] = scf.if {{.*}} { // CHECK: scf.yield %[[arg0_m]] // CHECK: } else { diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir index 7d429e484011..454c17aef4d8 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir @@ -87,32 +87,32 @@ func.func @read_of_alloc_tensor_is_not_a_conflict(%f: f32, %idx: index) -> f32 { // ----- -// CHECK-LABEL: func @to_memref_not_read_only( -func.func @to_memref_not_read_only(%idx : index, %f: f32) -> f32 { +// CHECK-LABEL: func @to_buffer_not_read_only( +func.func @to_buffer_not_read_only(%idx : index, %f: f32) -> f32 { %t = tensor.generate { ^bb0(%i : index): tensor.yield %f : f32 } : tensor<5xf32> - // Some op may write into the result of to_memref later. - // CHECK: bufferization.to_memref + // Some op may write into the result of to_buffer later. + // CHECK: bufferization.to_buffer // CHECK-SAME: {__inplace_operands_attr__ = ["false"]} - %m = bufferization.to_memref %t : tensor<5xf32> to memref<5xf32> + %m = bufferization.to_buffer %t : tensor<5xf32> to memref<5xf32> %2 = tensor.extract %t[%idx] : tensor<5xf32> return %2 : f32 } // ----- -// CHECK-LABEL: func @to_memref_read_only( -func.func @to_memref_read_only(%idx : index, %f: f32) -> f32 { +// CHECK-LABEL: func @to_buffer_read_only( +func.func @to_buffer_read_only(%idx : index, %f: f32) -> f32 { %t = tensor.generate { ^bb0(%i : index): tensor.yield %f : f32 } : tensor<5xf32> - // Some op may write into the result of to_memref later. - // CHECK: bufferization.to_memref + // Some op may write into the result of to_buffer later. + // CHECK: bufferization.to_buffer // CHECK-SAME: {__inplace_operands_attr__ = ["true"]} - %m = bufferization.to_memref %t {read_only} : tensor<5xf32> to memref<5xf32> + %m = bufferization.to_buffer %t {read_only} : tensor<5xf32> to memref<5xf32> %2 = tensor.extract %t[%idx] : tensor<5xf32> return %2 : f32 } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir index c26f1681e4d9..e97777c3e3d1 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir @@ -47,7 +47,7 @@ func.func @alloc_tesor_copy_from_default_space(%arg0: tensor<128xf32>) -> tensor // CHECK-LABEL: @alloc_tesor_copy_from_default_space // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32>) -> tensor<128xf32> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32> to memref<128xf32, strided<[?], offset: ?>> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32> to memref<128xf32, strided<[?], offset: ?>> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>> to memref<128xf32, 1> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32> @@ -63,7 +63,7 @@ func.func @alloc_tesor_copy_from_non_default_space(%arg0: tensor<128xf32, 1>) -> // CHECK-LABEL: @alloc_tesor_copy_from_non_default_space // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64> @@ -82,9 +82,9 @@ func.func @alloc_tesor_copy_from_non_default_space_no_cast(%arg0: tensor<128xf32 // CHECK-LABEL: @alloc_tesor_copy_from_non_default_space_no_cast // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: tensor<4xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg1]] : tensor<4xf32, 1 : i64> to memref<4xf32, strided<[?], offset: ?>, 1> -// CHECK: %[[v1:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> -// CHECK: %[[v2:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg1]] : tensor<4xf32, 1 : i64> to memref<4xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v1:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v2:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v2]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v3:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 1 : i64> @@ -104,7 +104,7 @@ func.func @materialize_in_destination(%arg0: tensor<128xf32, 1>) -> tensor<128xf // CHECK-LABEL: @materialize_in_destination // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64> diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir index 194c3278c78a..908c760d9a0c 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir @@ -25,9 +25,9 @@ func.func @use_of_unknown_op_1(%t1: tensor) %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref> + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref> // CHECK: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref> - // CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref + // CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref // CHECK-NO-LAYOUT-MAP: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref %1 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> return %1 : vector<5xf32> @@ -55,13 +55,13 @@ func.func @use_of_unknown_op_3(%t1: tensor) -> (vector<5xf32>, vector<5xf32>) { %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[v1:.*]] = vector.transfer_read %[[m1]] %1 = vector.transfer_read %t1[%idx], %cst : tensor, vector<5xf32> // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) %0 = "test.dummy_op"(%t1) : (tensor) -> tensor - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref> + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref> // CHECK: %[[v2:.*]] = vector.transfer_read %[[dummy_memref]] %2 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> @@ -81,7 +81,7 @@ func.func @use_of_unknown_op_4(%t1: tensor) // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) %0 = "test.dummy_op"(%t1) : (tensor) -> tensor - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] // CHECK: %[[v1:.*]] = vector.transfer_read %[[dummy_memref]] %1 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> @@ -98,7 +98,7 @@ func.func @use_of_unknown_op_4(%t1: tensor) // CHECK-SAME: %[[t1:.*]]: tensor func.func @use_of_bufferizable_op_in_unbufferizable_op( %t1: tensor, %o: index, %s: index) -> (tensor, tensor) { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[subview:.*]] = memref.subview %[[m1]] // The op must alloc because "test.dummy" may bufferize to a memory write. // CHECK: %[[alloc:.*]] = memref.alloc @@ -119,7 +119,7 @@ func.func @unused_unknown_op(%t1 : tensor) -> vector<5xf32> { %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: vector.transfer_read %[[m1]] %1 = vector.transfer_read %t1[%idx], %cst : tensor, vector<5xf32> @@ -166,7 +166,7 @@ func.func @unknown_op_may_read(%v: vector<5xf32>) func.func @unknown_op_not_writable( %t1 : tensor, %v : vector<5xf32>, %idx : index) -> tensor { // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] %0 = "test.dummy_op"(%t1) : (tensor) -> (tensor) // The result of an unknown op is not writable. Always generate a copy. @@ -186,7 +186,7 @@ func.func @unknown_op_not_writable( // CHECK-TENSOR-LABEL: func @simple_tensor_test( // CHECK-TENSOR-SAME: %[[t1:.*]]: tensor func.func @simple_tensor_test(%t1 : tensor, %f : f32) -> tensor { - // CHECK-TENSOR: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] + // CHECK-TENSOR: %[[t1_memref:.*]] = bufferization.to_buffer %[[t1]] %c0 = arith.constant 0 : index // CHECK-TENSOR: %[[alloc:.*]] = memref.alloc // CHECK-TENSOR: memref.copy %[[t1_memref]], %[[alloc]] @@ -203,7 +203,7 @@ func.func @simple_tensor_test(%t1 : tensor, %f : f32) -> tensor { // CHECK-SCF-SAME: %[[t1:.*]]: tensor {bufferization.writable = true}, %[[c:.*]]: i1, %[[pos:.*]]: index func.func @simple_scf_if(%t1: tensor {bufferization.writable = true}, %c: i1, %pos: index, %f: f32) -> (tensor, index) { - // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] + // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_buffer %[[t1]] // CHECK-SCF: %[[r:.*]] = scf.if %[[c]] -> (memref) { %r1, %r2 = scf.if %c -> (tensor, index) { // CHECK-SCF: scf.yield %[[t1_memref]] @@ -211,7 +211,7 @@ func.func @simple_scf_if(%t1: tensor {bufferization.writable = true}, %c: // CHECK-SCF: } else { } else { // CHECK-SCF: %[[insert:.*]] = tensor.insert %{{.*}} into %[[t1]][{{.*}}] - // CHECK-SCF: %[[insert_memref:.*]] = bufferization.to_memref %[[insert]] + // CHECK-SCF: %[[insert_memref:.*]] = bufferization.to_buffer %[[insert]] %1 = tensor.insert %f into %t1[%pos] : tensor // CHECK-SCF: scf.yield %[[insert_memref]] scf.yield %1, %pos : tensor, index diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir index e65c5b92949f..803173201183 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir @@ -39,7 +39,7 @@ func.func @use_tensor_func_arg(%A : tensor) -> (vector<4xf32>) { %c0 = arith.constant 0 : index %f0 = arith.constant 0.0 : f32 - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[res:.*]] = vector.transfer_read %[[A_memref]] %0 = vector.transfer_read %A[%c0], %f0 : tensor, vector<4xf32> @@ -54,7 +54,7 @@ func.func @use_tensor_func_arg(%A : tensor) -> (vector<4xf32>) { func.func @return_tensor(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] @@ -102,7 +102,7 @@ func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index) -> (f32, f32) { // CHECK-DAG: %[[alloc:.*]] = memref.alloc // CHECK-DAG: %[[dummy:.*]] = "test.dummy_op" - // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_memref %[[dummy]] + // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_buffer %[[dummy]] %t = "test.dummy_op"() : () -> (tensor<10xf32>) // CHECK: memref.copy %[[dummy_m]], %[[alloc]] @@ -134,7 +134,7 @@ func.func @copy_deallocated() -> tensor<10xf32> { // CHECK-LABEL: func @select_different_tensors( // CHECK-SAME: %[[t:.*]]: tensor func.func @select_different_tensors(%t: tensor, %sz: index, %pos: index, %c: i1) -> f32 { - // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref + // CHECK-DAG: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref %0 = bufferization.alloc_tensor(%sz) : tensor @@ -154,7 +154,7 @@ func.func @select_different_tensors(%t: tensor, %sz: index, %pos: index, // moment because this would create a tensor op during bufferization. That is // currently forbidden. func.func @alloc_tensor_with_copy(%t: tensor<5xf32>) -> tensor<5xf32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK: memref.copy %[[m]], %[[alloc]] %0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32> @@ -200,7 +200,7 @@ func.func @read_of_alias(%t: tensor<100xf32>, %pos1: index, %pos2: index, // CHECK-LABEL: func @from_unranked_to_unranked( // CHECK-SAME: %[[arg0:.*]]: tensor<*xi32> func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] : tensor<*xi32> to memref<*xi32> + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[arg0]] : tensor<*xi32> to memref<*xi32> // CHECK: %[[t:.*]] = bufferization.to_tensor %[[m]] // CHECK: return %[[t]] : tensor<*xi32> %0 = tensor.cast %arg0 : tensor<*xi32> to tensor<*xi32> @@ -212,7 +212,7 @@ func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> { // CHECK-LABEL: func @tensor_copy( // CHECK-SAME: %[[arg0:.*]]: tensor<5xf32>) func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] @@ -227,7 +227,7 @@ func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> { // CHECK-LABEL: func @materialize_in_destination_buffer( // CHECK-SAME: %[[t:.*]]: tensor<5xf32>, %[[m:.*]]: memref<5xf32>) -// CHECK: %[[b:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK: %[[b:.*]] = bufferization.to_buffer %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK: memref.copy %[[b]], %[[m]] func.func @materialize_in_destination_buffer(%t: tensor<5xf32>, %m: memref<5xf32>) { bufferization.materialize_in_destination %t in restrict writable %m @@ -268,4 +268,42 @@ func.func @materialize_in_dest_raw(%f: f32, %f2: f32, %idx: index) -> (tensor<5x %r = tensor.extract %dest_filled[%idx] : tensor<5xf32> return %0, %r : tensor<5xf32>, f32 -} \ No newline at end of file +} + +// ----- + +// CHECK: func.func @custom_op( +// CHECK-SAME: %[[ARG:.*]]: !test.test_tensor<[32, 64], f64> +// CHECK-SAME: ) -> !test.test_tensor<[32, 128], f64> { +func.func @custom_op(%arg: !test.test_tensor<[32, 64], f64>) + -> !test.test_tensor<[32, 128], f64> { + // CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[ARG]] + // CHECK: %[[DUMMY:.*]] = "test.dummy_memref_op"(%[[MEMREF]]) + // CHECK-SAME: : (!test.test_memref<[32, 64], f64>) + // CHECK-SAME: -> !test.test_memref<[32, 128], f64> + // CHECK: %[[OUT:.*]] = bufferization.to_tensor %[[DUMMY]] + %out = "test.dummy_tensor_op"(%arg) : (!test.test_tensor<[32, 64], f64>) + -> !test.test_tensor<[32, 128], f64> + + // CHECK: return %[[OUT]] + return %out : !test.test_tensor<[32, 128], f64> +} + +// ----- + +// CHECK: func.func @custom_origin_op() +// CHECK-SAME: -> !test.test_tensor<[42], f64> { +func.func @custom_origin_op() -> !test.test_tensor<[42], f64> { + // CHECK: %[[MEMREF:.*]] = "test.create_memref_op"() : () + // CHECK-SAME: -> !test.test_memref<[21], f64> + // CHECK: %[[DUMMY:.*]] = "test.dummy_memref_op"(%[[MEMREF]]) + // CHECK-SAME: : (!test.test_memref<[21], f64>) + // CHECK-SAME: -> !test.test_memref<[42], f64> + %in = "test.create_tensor_op"() : () -> !test.test_tensor<[21], f64> + %out = "test.dummy_tensor_op"(%in) : (!test.test_tensor<[21], f64>) + -> !test.test_tensor<[42], f64> + + // CHECK: %[[OUT:.*]] = bufferization.to_tensor %[[DUMMY]] + // CHECK: return %[[OUT]] + return %out : !test.test_tensor<[42], f64> +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir index 230a0ed42948..3fb89aa1a002 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 no-analysis-func-filter=contains_to_memref_op" -drop-equivalent-buffer-results --split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 no-analysis-func-filter=contains_to_buffer_op" -drop-equivalent-buffer-results --split-input-file | FileCheck %s // ToMemref ops do not pass analysis step. CopyBeforeWrite will be true only for the -// FuncOp "contains_to_memref_op" since it is specified in no-analysis-func-filter. +// FuncOp "contains_to_buffer_op" since it is specified in no-analysis-func-filter. // RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 copy-before-write=1" -drop-equivalent-buffer-results --split-input-file | FileCheck %s --check-prefix=CHECK_COPY @@ -21,14 +21,14 @@ module { return %inserted : tensor } - // CHECK-LABEL: func.func @contains_to_memref_op( + // CHECK-LABEL: func.func @contains_to_buffer_op( // CHECK: memref.copy - // CHECK_COPY-LABEL: func.func @contains_to_memref_op( + // CHECK_COPY-LABEL: func.func @contains_to_buffer_op( // CHECK_COPY: memref.copy - func.func @contains_to_memref_op(%arg0: tensor {bufferization.writable = true}, %arg1: index) -> vector<5xf32> { - %0 = bufferization.to_memref %arg0 : tensor to memref + func.func @contains_to_buffer_op(%arg0: tensor {bufferization.writable = true}, %arg1: index) -> vector<5xf32> { + %0 = bufferization.to_buffer %arg0 : tensor to memref %cst = arith.constant 0.000000e+00 : f32 %1 = vector.transfer_read %0[%arg1], %cst : memref, vector<5xf32> return %1 : vector<5xf32> diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir index ec2fb58ee03f..3817ae13a9cc 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir @@ -70,7 +70,7 @@ func.func @call_to_unknown_tensor_returning_func(%t : tensor) { // CHECK-NO-LAYOUT-MAP: %[[alloc_no_layout:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<2x?xf32> // CHECK-NO-LAYOUT-MAP: memref.copy %[[subview]], %[[alloc_no_layout]] // TODO: %alloc should be deallocated here, but we currently do not dealloc -// buffers that are inserted due to to_tensor/to_memref canonicalization (when +// buffers that are inserted due to to_tensor/to_buffer canonicalization (when // the buffer types have different layout maps). // CHECK-NO-LAYOUT-MAP: return %[[alloc_no_layout]] @@ -669,17 +669,17 @@ func.func @call_llvm_func() { // ----- -// CHECK-LABEL: func @to_memref_op_unsupported( +// CHECK-LABEL: func @to_buffer_op_unsupported( // CHECK-SAME: %[[arg0:.*]]: memref {bufferization.writable = true}, %idx1: index, %idx2: index, %idx3: index, %v1: vector<5xf32>) -> (vector<5xf32>) { // Insert a copy because we cannot analyze what happens with the result of a - // to_memref op. + // to_buffer op. // CHECK: %[[alloc:.*]] = memref.alloc // CHECK: memref.copy %[[arg0]], %[[alloc]] - %0 = bufferization.to_memref %t1 : tensor to memref + %0 = bufferization.to_buffer %t1 : tensor to memref // CHECK: "test.foo"(%[[alloc]]) "test.foo"(%0) : (memref) -> () @@ -796,3 +796,58 @@ func.func @result_type_mismatch(%c: i1) -> tensor<5xf32> { return %1 : tensor<5xf32> } +// ----- + +// CHECK: func.func @custom_types( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> (!test.test_memref<[4, 8], f64>, +// CHECK-SAME: !test.test_memref<[4, 8], f64>) +func.func @custom_types(%arg: !test.test_tensor<[4, 4], f64>) + -> (!test.test_tensor<[4, 8], f64>, !test.test_tensor<[4, 8], f64>) { + // CHECK: %[[out1:.*]] = "test.dummy_memref_op"(%[[arg]]) : + // CHECK-SAME: (!test.test_memref<[4, 4], f64>) -> !test.test_memref<[4, 8], f64> + %out1 = "test.dummy_tensor_op"(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: %[[alloc:.*]] = "test.create_memref_op" + // CHECK: %[[out2:.*]] = "test.dummy_memref_op"(%[[alloc]]) + // CHECK-SAME: (!test.test_memref<[4, 4], f64>) -> !test.test_memref<[4, 8], f64> + %alloc = "test.create_tensor_op"() : () -> !test.test_tensor<[4, 4], f64> + %out2 = "test.dummy_tensor_op"(%alloc) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: return %[[out1]], %[[out2]] + return %out1, %out2 : + !test.test_tensor<[4, 8], f64>, !test.test_tensor<[4, 8], f64> +} + +// ----- + +// CHECK: func.func @custom_types_foo( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> !test.test_memref<[4, 4], f64> +func.func @custom_types_foo(%arg: !test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> { + // CHECK: %[[out:.*]] = "test.dummy_memref_op"(%[[arg]]) + %out = "test.dummy_tensor_op"(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> + // CHECK: return %[[out]] + return %out : !test.test_tensor<[4, 4], f64> +} + +// CHECK: func.func @custom_types_bar( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> !test.test_memref<[4, 8], f64> +func.func @custom_types_bar(%arg: !test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> { + // CHECK: %[[call:.*]] = call @custom_types_foo(%[[arg]]) + %call = func.call @custom_types_foo(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> + + // CHECK: %[[out:.*]] = "test.dummy_memref_op"(%[[call]]) + %out = "test.dummy_tensor_op"(%call) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: return %[[out]] + return %out : !test.test_tensor<[4, 8], f64> +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir new file mode 100644 index 000000000000..d8b1a00522ab --- /dev/null +++ b/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir @@ -0,0 +1,37 @@ +// RUN: mlir-opt %s -test-tensorlike-bufferlike -split-input-file | FileCheck %s + +// CHECK: func.func @builtin_unranked +// CHECK-SAME: {found = {operand_0 = "is_tensor_like", result_0 = "is_buffer_like"}} +func.func @builtin_unranked(%t: tensor<*xf32>) -> (memref<*xf32>) +{ + %0 = bufferization.to_buffer %t : tensor<*xf32> to memref<*xf32> + return %0 : memref<*xf32> +} + +// ----- + +// CHECK: func.func @builtin_ranked +// CHECK-SAME: {found = {operand_0 = "is_tensor_like", result_0 = "is_buffer_like"}} +func.func @builtin_ranked(%t: tensor<42xf32>) -> (memref<42xf32>) +{ + %0 = bufferization.to_buffer %t : tensor<42xf32> to memref<42xf32> + return %0 : memref<42xf32> +} + +// ----- + +// CHECK: func.func @custom_tensor +// CHECK-SAME: {found = {operand_0 = "is_tensor_like"}} +func.func @custom_tensor(%t: !test.test_tensor<[42], f32>) -> () +{ + return +} + +// ----- + +// CHECK: func.func @custom_memref +// CHECK-SAME: {found = {operand_0 = "is_buffer_like"}} +func.func @custom_memref(%t: !test.test_memref<[42], f32>) -> () +{ + return +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir index 3c50a9e72d9d..2c0323c5a2ac 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir @@ -15,7 +15,7 @@ module attributes {transform.with_named_sequence} { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] @@ -45,7 +45,7 @@ module attributes {transform.with_named_sequence} { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]] @@ -117,7 +117,7 @@ module { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] @@ -222,8 +222,8 @@ module attributes {transform.with_named_sequence} { transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { %alloc_tensor = transform.structured.match ops{["bufferization.alloc_tensor"]} in %arg1 : (!transform.any_op) -> !transform.op<"bufferization.alloc_tensor"> - %2, %new = transform.structured.bufferize_to_allocation %alloc_tensor - {alloc_op = "memref.alloca"} + %2, %new = transform.structured.bufferize_to_allocation %alloc_tensor + {alloc_op = "memref.alloca"} : !transform.op<"bufferization.alloc_tensor"> transform.yield } diff --git a/mlir/test/Dialect/Bufferization/canonicalize.mlir b/mlir/test/Dialect/Bufferization/canonicalize.mlir index 3ebc1e4fa8de..db2a8ae65c8c 100644 --- a/mlir/test/Dialect/Bufferization/canonicalize.mlir +++ b/mlir/test/Dialect/Bufferization/canonicalize.mlir @@ -3,10 +3,10 @@ // RUN: --split-input-file -allow-unregistered-dialect | \ // RUN: FileCheck %s -// Basic folding of to_tensor(to_memref(t)) -> t +// Basic folding of to_tensor(to_buffer(t)) -> t // CHECK-LABEL: func @tensor_load_of_buffer_cast( func.func @tensor_load_of_buffer_cast(%arg0: tensor) -> tensor { - %0 = bufferization.to_memref %arg0 : tensor to memref + %0 = bufferization.to_buffer %arg0 : tensor to memref %1 = bufferization.to_tensor %0 : memref to tensor return %1 : tensor } @@ -15,11 +15,11 @@ func.func @tensor_load_of_buffer_cast(%arg0: tensor) -> tensor { // ----- -// Basic folding of to_memref(to_tensor(m)) -> m +// Basic folding of to_buffer(to_tensor(m)) -> m // CHECK-LABEL: func @buffer_cast_of_tensor_load( func.func @buffer_cast_of_tensor_load(%arg0: memref) -> memref { %0 = bufferization.to_tensor %arg0 : memref to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } // CHECK-SAME: %[[MEMREF:.*]]: memref) -> memref { @@ -35,13 +35,13 @@ func.func @buffer_cast_of_tensor_load(%arg0: memref) -> memref { // CHECK-SAME: -> memref { // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor // CHECK-SAME: %[[MEMREF_ADDRSPACE2]] : memref to tensor -// CHECK: %[[MEMREF_ADDRSPACE7:.*]] = bufferization.to_memref +// CHECK: %[[MEMREF_ADDRSPACE7:.*]] = bufferization.to_buffer // CHECK-SAME: %[[TENSOR]] : tensor to memref // CHECK: return %[[MEMREF_ADDRSPACE7]] func.func @no_fold_buffer_cast_of_tensor_load(%arg0: memref) -> memref { %0 = bufferization.to_tensor %arg0 : memref to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } @@ -53,7 +53,7 @@ func.func @no_fold_buffer_cast_of_tensor_load(%arg0: memref) // CHECK-SAME: %[[M:.*]]: memref>) // CHECK-SAME: -> memref> { // CHECK-NOT: bufferization.to_tensor -// CHECK-NOT: bufferization.to_memref +// CHECK-NOT: bufferization.to_buffer // CHECK: %[[R:.*]] = memref.cast %[[M]] // CHECK-SAME: memref> to memref> // CHECK: return %[[R]] @@ -62,7 +62,7 @@ func.func @canonicalize_buffer_cast_of_tensor_load( -> memref> { %0 = bufferization.to_tensor %arg0 : memref> to tensor - %1 = bufferization.to_memref %0 : tensor to memref> + %1 = bufferization.to_buffer %0 : tensor to memref> return %1 : memref> } @@ -75,13 +75,13 @@ func.func @canonicalize_buffer_cast_of_tensor_load_to_copy( %arg0: memref>) -> memref> { %0 = bufferization.to_tensor %arg0 : memref> to tensor - %1 = bufferization.to_memref %0 : tensor to memref> + %1 = bufferization.to_buffer %0 : tensor to memref> return %1 : memref> } // CHECK-SAME: %[[M:.*]]: memref>) // CHECK-SAME: -> memref> { // CHECK-NOT: bufferization.to_tensor -// CHECK-NOT: bufferization.to_memref +// CHECK-NOT: bufferization.to_buffer // CHECK: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[DIM:.*]] = memref.dim %[[M]], %[[C0]] : memref> // CHECK: %[[ALLOC:.*]] = memref.alloc(%[[DIM]]) : memref> @@ -247,26 +247,26 @@ func.func @clone_and_preceding_dealloc(%arg0: memref) -> memref<32xf32> { // ----- -// CHECK-LABEL: func @tensor_cast_to_memref +// CHECK-LABEL: func @tensor_cast_to_buffer // CHECK-SAME: %[[ARG0:.+]]: tensor<4x6x16x32xi8> -func.func @tensor_cast_to_memref(%arg0 : tensor<4x6x16x32xi8>) -> +func.func @tensor_cast_to_buffer(%arg0 : tensor<4x6x16x32xi8>) -> memref { %0 = tensor.cast %arg0 : tensor<4x6x16x32xi8> to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } -// CHECK: %[[M:.+]] = bufferization.to_memref %[[ARG0]] : tensor<4x6x16x32xi8> +// CHECK: %[[M:.+]] = bufferization.to_buffer %[[ARG0]] : tensor<4x6x16x32xi8> // CHECK: %[[M1:.+]] = memref.cast %[[M]] // CHECK-SAME: memref<4x6x16x32xi8> to memref // CHECK: return %[[M1]] : memref // ----- -// Folding of memref.load(to_memref(%v, %idxs)) -> tensor.extract(%v, %idx) +// Folding of memref.load(to_buffer(%v, %idxs)) -> tensor.extract(%v, %idx) // CHECK-LABEL: func @load_from_buffer_cast( func.func @load_from_buffer_cast(%arg0: index, %arg1: index, %arg2: tensor) -> f32 { - %0 = bufferization.to_memref %arg2 : tensor to memref + %0 = bufferization.to_buffer %arg2 : tensor to memref %1 = memref.load %0[%arg0, %arg1] : memref return %1 : f32 } diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir index 7b6a6f492d06..fc6df4a09f70 100644 --- a/mlir/test/Dialect/Bufferization/ops.mlir +++ b/mlir/test/Dialect/Bufferization/ops.mlir @@ -11,12 +11,12 @@ func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> { return %clone : memref<*xf32> } -// CHECK-LABEL: test_to_memref -func.func @test_to_memref(%arg0: tensor, %arg1: tensor<*xi64>) +// CHECK-LABEL: test_to_buffer +func.func @test_to_buffer(%arg0: tensor, %arg1: tensor<*xi64>) -> (memref (d0 + 7)>>, memref<*xi64, 1>) { - %0 = bufferization.to_memref %arg0 + %0 = bufferization.to_buffer %arg0 : tensor to memref (d0 + 7)>> - %1 = bufferization.to_memref %arg1 + %1 = bufferization.to_buffer %arg1 : tensor<*xi64> to memref<*xi64, 1> return %0, %1 : memref (d0 + 7)>>, memref<*xi64, 1> } diff --git a/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir b/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir index f5c9f81a1899..e37b63d01378 100644 --- a/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir @@ -3,7 +3,7 @@ // CHECK-NO-FUNC-LABEL: func @br( // CHECK-NO-FUNC-SAME: %[[t:.*]]: tensor<5xf32>) -// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> { // CHECK-NO-FUNC: cf.br ^[[block:.*]](%[[m]] // CHECK-NO-FUNC: ^[[block]](%[[arg1:.*]]: memref<5xf32, strided<[?], offset: ?>>): @@ -23,7 +23,7 @@ func.func @br(%t: tensor<5xf32>) { // CHECK-NO-FUNC-LABEL: func @cond_br( // CHECK-NO-FUNC-SAME: %[[t1:.*]]: tensor<5xf32>, -// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK-NO-FUNC: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> { // CHECK-NO-FUNC: cf.cond_br %{{.*}}, ^[[block1:.*]](%[[m1]] : {{.*}}), ^[[block2:.*]](%[[alloc]] : {{.*}}) diff --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir index 530badebd5c7..1c6cb88fa028 100644 --- a/mlir/test/Dialect/Linalg/bufferize.mlir +++ b/mlir/test/Dialect/Linalg/bufferize.mlir @@ -3,7 +3,7 @@ #map0 = affine_map<(d0) -> (d0)> // In-depth checking of a basic case, this is testing -// - bufferization.to_memref / bufferization.to_tensor materializations are +// - bufferization.to_buffer / bufferization.to_tensor materializations are // properly inserted // - payload is correctly carried over // - affine maps are correctly carried over @@ -12,7 +12,7 @@ // CHECK: #map = affine_map<(d0) -> (d0)> // CHECK-LABEL: func @basic( // CHECK-SAME: %[[TENSOR:.*]]: tensor<4xf32>) -> tensor<4xf32> { -// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<4xf32> to memref<4xf32> +// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<4xf32> to memref<4xf32> // CHECK-DAG: %[[RESULT_MEMREF:.*]] = memref.alloc() {{.*}} : memref<4xf32> // CHECK: linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} // CHECK-SAME: ins(%[[MEMREF]] : memref<4xf32>) @@ -46,7 +46,7 @@ func.func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: #map = affine_map<(d0) -> (d0)> // CHECK-LABEL: func @empty_tensor( // CHECK-SAME: %[[IN:.*]]: tensor, %[[SIZE:.*]]: index) -// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[IN]] : tensor to memref +// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_buffer %[[IN]] : tensor to memref // CHECK-DAG: %[[OUT_BUF:.*]] = memref.alloc(%[[SIZE]]) {{.*}} : memref // CHECK: linalg.generic // CHECK-SAME: ins(%[[MEMREF]] : memref) @@ -105,7 +105,7 @@ func.func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf3 // CHECK-DAG: %[[DIM1:.*]] = tensor.dim %[[ARG]], %[[C1]] : tensor // CHECK-DAG: %[[RESULT0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref // CHECK-DAG: %[[RESULT1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref -// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_memref %[[ARG]] : tensor to memref +// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_buffer %[[ARG]] : tensor to memref // CHECK: linalg.generic // CHECK-SAME: ins(%[[MEMREF_ARG]] : memref) // CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref, memref) @@ -141,8 +141,8 @@ func.func @dynamic_results(%arg0: tensor) // CHECK-SAME: %[[ARG0_TENSOR:.*]]: tensor<2x3x4xvector<3x4xi4>>, // CHECK-SAME: %[[ARG1_TENSOR:.*]]: tensor<3x2xf32>) -> tensor<3x2xf32> { // CHECK-DAG: %[[INIT_BUFFER:.*]] = memref.alloc() {{.*}} : memref<3x2xf32> -// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_memref %[[ARG0_TENSOR]] : tensor<2x3x4xvector<3x4xi4>> -// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_memref %[[ARG1_TENSOR]] : tensor<3x2xf32> +// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_buffer %[[ARG0_TENSOR]] : tensor<2x3x4xvector<3x4xi4>> +// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_buffer %[[ARG1_TENSOR]] : tensor<3x2xf32> // CHECK: memref.copy %[[ARG1_MEMREF]], %[[INIT_BUFFER]] : memref<3x2xf32> to memref<3x2xf32> // CHECK: linalg.generic // CHECK-SAME: ins(%[[ARG0_MEMREF]] : memref<2x3x4xvector<3x4xi4>>) @@ -194,7 +194,7 @@ func.func @bufferize_dot(%in: tensor<4xf32>, %out: tensor) -> tensor { // CHECK-LABEL: func @bufferize_softmax( // CHECK-SAME: %[[arg0:.*]]: tensor<2x16x32xf32>, %[[arg1:.*]]: tensor<2x16x32xf32> -// CHECK: %[[m0:.*]] = bufferization.to_memref %[[arg0]] +// CHECK: %[[m0:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[alloc:.*]] = memref.alloc() // CHECK-NOT: memref.copy // CHECK: linalg.softmax dimension(2) ins(%[[m0]] : {{.*}}) outs(%[[alloc:.*]] : {{.*}}) diff --git a/mlir/test/Dialect/Linalg/hoisting.mlir b/mlir/test/Dialect/Linalg/hoisting.mlir index 4e1035e038ca..318edca73cce 100644 --- a/mlir/test/Dialect/Linalg/hoisting.mlir +++ b/mlir/test/Dialect/Linalg/hoisting.mlir @@ -519,7 +519,7 @@ module attributes {transform.with_named_sequence} { // memory (i.e. `%collapsed_1` and `%collapsed_2` alias): // %acc = vector.transfer_read %collapsed_2[%c0] -// CHECK-LABEL: func.func @no_hoisting_write_to_memref +// CHECK-LABEL: func.func @no_hoisting_write_to_buffer // CHECK: scf.for {{.*}} { // CHECK: vector.transfer_read {{.*}} : memref<2xi32>, vector<1xi32> // CHECK-NEXT: vector.transfer_read {{.*}} : memref<2xi32>, vector<1xi32> @@ -527,7 +527,7 @@ module attributes {transform.with_named_sequence} { // CHECK-NEXT: vector.transfer_write {{.*}} : vector<1xi32>, memref<2xi32> // CHECK-NEXT: } -func.func @no_hoisting_write_to_memref(%rhs: i32, %arg1: vector<1xi32>) { +func.func @no_hoisting_write_to_buffer(%rhs: i32, %arg1: vector<1xi32>) { %c0_i32 = arith.constant 0 : i32 %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index diff --git a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir index 35cbd7725ec5..4d7ddc8a513c 100644 --- a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir @@ -101,7 +101,7 @@ module attributes {transform.with_named_sequence} { // CHECK-LABEL: func @tensor_pad_constant( // CHECK-SAME: %[[t:.*]]: tensor -// CHECK: %[[src:.*]] = bufferization.to_memref %[[t]] +// CHECK: %[[src:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc // CHECK: %[[subview:.*]] = memref.subview %[[alloc]] // CHECK: memref.copy %[[src]], %[[subview]] @@ -130,7 +130,7 @@ module attributes {transform.with_named_sequence} { // CHECK-LABEL: func @tensor_insert( // CHECK-SAME: %[[t:.*]]: tensor -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc(%{{.*}}) : memref // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: memref.store %{{.*}}, %[[alloc]] diff --git a/mlir/test/Dialect/MemRef/normalize-memrefs.mlir b/mlir/test/Dialect/MemRef/normalize-memrefs.mlir index 6d20ccbf2ca0..31d418324046 100644 --- a/mlir/test/Dialect/MemRef/normalize-memrefs.mlir +++ b/mlir/test/Dialect/MemRef/normalize-memrefs.mlir @@ -360,7 +360,7 @@ func.func @neg_map() -> memref<2x3xf32, #neg> { // CHECK-LABEL: func @memref_with_strided_offset func.func @memref_with_strided_offset(%arg0: tensor<128x512xf32>, %arg1: index, %arg2: index) -> tensor<16x512xf32> { %c0 = arith.constant 0 : index - %0 = bufferization.to_memref %arg0 : tensor<128x512xf32> to memref<128x512xf32, strided<[?, ?], offset: ?>> + %0 = bufferization.to_buffer %arg0 : tensor<128x512xf32> to memref<128x512xf32, strided<[?, ?], offset: ?>> %subview = memref.subview %0[%arg2, 0] [%arg1, 512] [1, 1] : memref<128x512xf32, strided<[?, ?], offset: ?>> to memref> // CHECK: %{{.*}} = memref.cast %{{.*}} : memref> to memref<16x512xf32, strided<[?, ?], offset: ?>> %cast = memref.cast %subview : memref> to memref<16x512xf32, strided<[?, ?], offset: ?>> diff --git a/mlir/test/Dialect/SCF/bufferize.mlir b/mlir/test/Dialect/SCF/bufferize.mlir index 6c08d9f68e8a..20a640776b56 100644 --- a/mlir/test/Dialect/SCF/bufferize.mlir +++ b/mlir/test/Dialect/SCF/bufferize.mlir @@ -4,8 +4,8 @@ // CHECK-SAME: %[[PRED:.*]]: i1, // CHECK-SAME: %[[TRUE_TENSOR:.*]]: tensor, // CHECK-SAME: %[[FALSE_TENSOR:.*]]: tensor) -> tensor { -// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : tensor to memref -// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : tensor to memref +// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_buffer %[[TRUE_TENSOR]] : tensor to memref +// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_buffer %[[FALSE_TENSOR]] : tensor to memref // CHECK: %[[RESULT_MEMREF:.*]] = scf.if %[[PRED]] -> (memref) { // CHECK: scf.yield %[[TRUE_MEMREF]] : memref // CHECK: } else { @@ -29,7 +29,7 @@ func.func @if(%pred: i1, %true_val: tensor, %false_val: tensor) -> // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index, // CHECK-SAME: %[[STEP:.*]]: index) -> tensor { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // Note: scf.for iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() @@ -70,7 +70,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor // CHECK-LABEL: func @for_correct_recursive_legalization_behavior( // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[INDEX:.*]]: index) -> tensor { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // Note: scf.for iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() @@ -78,7 +78,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor // CHECK: %[[RESULT:.*]] = scf.for %{{.*}} = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF_COPY]]) -> (memref) { // CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref // CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor) -> tensor -// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : tensor to memref +// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_buffer %[[TENSOR_MUNGED]] : tensor to memref // CHECK: scf.yield %[[MEMREF_MUNGED]] : memref // CHECK: } // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT]] : memref @@ -96,7 +96,7 @@ func.func @for_correct_recursive_legalization_behavior(%arg0: tensor, %inde // CHECK-LABEL: func @bufferize_while( // CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor -// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : tensor to memref +// CHECK: %[[M:.*]] = bufferization.to_buffer %[[ARG2]] : tensor to memref // Note: scf.while iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir index 709943e59658..6b6207395f14 100644 --- a/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir @@ -13,14 +13,14 @@ func.func @scf_for_iter_arg(%arg0: tensor<128xf32, 1>, %arg1: index, %arg2: inde // CHECK-LABEL: func.func @scf_for_iter_arg // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: index, %[[arg2:.+]]: index, %[[arg3:.+]]: index) -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 1> // CHECK: %[[cast:.+]] = memref.cast %[[alloc]] : memref<128xf32, 1> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v1:.+]] = scf.for %{{.+}} = %[[arg1]] to %[[arg2]] step %[[arg3]] iter_args(%[[arg6:.+]] = %[[cast]]) -> (memref<128xf32, strided<[?], offset: ?>, 1>) // CHECK-NEXT: %[[v3:.+]] = bufferization.to_tensor %[[arg6]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> // CHECK-NEXT: %[[v4:.+]] = "some.use"(%[[v3]]) : (tensor<128xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> -// CHECK-NEXT: %[[v5:.+]] = bufferization.to_memref %[[v4]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK-NEXT: %[[v5:.+]] = bufferization.to_buffer %[[v4]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK-NEXT: scf.yield %[[v5]] : memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> // CHECK: return %[[v2]] : tensor<128xf32, 1 : i64> @@ -49,7 +49,7 @@ func.func @scf_forall( // CHECK: scf.forall // CHECK: %[[v2:.+]] = bufferization.to_tensor %{{.+}} : memref to tensor // CHECK: %[[v3:.+]] = "some.use"(%[[v2]]) : (tensor) -> tensor -// CHECK: bufferization.to_memref %[[v3]] : tensor to memref, 1> +// CHECK: bufferization.to_buffer %[[v3]] : tensor to memref, 1> // CHECK: %[[v1:.+]] = bufferization.to_tensor %{{.+}} : memref to tensor // CHECK: return %[[v1]] : tensor @@ -65,7 +65,7 @@ func.func @scf_execute_region(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 1> { // CHECK-LABEL: func.func @scf_execute_region // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v1:.+]] = scf.execute_region -> memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: scf.yield %[[v0]] : memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> diff --git a/mlir/test/Dialect/Shape/bufferize.mlir b/mlir/test/Dialect/Shape/bufferize.mlir index 02e147d917d0..f6788f845a83 100644 --- a/mlir/test/Dialect/Shape/bufferize.mlir +++ b/mlir/test/Dialect/Shape/bufferize.mlir @@ -6,7 +6,7 @@ // CHECK: %[[WTRUE:.*]] = shape.const_witness true // CHECK: %[[MEMREF:.*]] = shape.assuming %[[WTRUE]] -> (memref<2xf16>) { // CHECK: %[[TENSOR_VAL:.*]] = "test.source"() : () -> tensor<2xf16> -// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_memref %[[TENSOR_VAL]] : tensor<2xf16> to memref<2xf16> +// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_buffer %[[TENSOR_VAL]] : tensor<2xf16> to memref<2xf16> // CHECK: shape.assuming_yield %[[YIELDED_MEMREF]] : memref<2xf16> // CHECK: } // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF:.*]] : memref<2xf16> diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir index 6d98667e7756..e20345f27b11 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir @@ -14,19 +14,19 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_6:.*]] = gpu.wait async // CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_5]], %[[VAL_3]] : memref // CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_5]], %[[VAL_4]] : memref // CHECK: %[[VAL_9:.*]], %[[VAL_10:.*]] = gpu.alloc async {{\[}}%[[VAL_6]]] (%[[VAL_7]], %[[VAL_8]]) : memref // CHECK: %[[VAL_11:.*]] = gpu.memcpy async {{\[}}%[[VAL_10]]] %[[VAL_9]], %[[VAL_5]] : memref, memref -// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_13:.*]] = gpu.wait async // CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_12]], %[[VAL_3]] : memref // CHECK: %[[VAL_15:.*]] = memref.dim %[[VAL_12]], %[[VAL_4]] : memref // CHECK: %[[VAL_16:.*]], %[[VAL_17:.*]] = gpu.alloc async {{\[}}%[[VAL_13]]] (%[[VAL_14]], %[[VAL_15]]) : memref // CHECK: %[[VAL_18:.*]] = gpu.memcpy async {{\[}}%[[VAL_17]]] %[[VAL_16]], %[[VAL_12]] : memref, memref -// CHECK: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_19:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_20:.*]] = gpu.wait async // CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_19]], %[[VAL_3]] : memref // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_19]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir index 63c308a3d5e6..01906f4c4517 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir @@ -30,13 +30,13 @@ // CHECK: %[[VAL_23:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref // CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]] = gpu.alloc async {{\[}}%[[VAL_22]]] (%[[VAL_23]]) : memref // CHECK: %[[VAL_26:.*]] = gpu.memcpy async {{\[}}%[[VAL_25]]] %[[VAL_24]], %[[VAL_11]] : memref, memref -// CHECK: %[[VAL_27:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_27:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_28:.*]] = gpu.wait async // CHECK: %[[VAL_29:.*]] = memref.dim %[[VAL_27]], %[[VAL_3]] : memref // CHECK: %[[VAL_30:.*]] = memref.dim %[[VAL_27]], %[[VAL_4]] : memref // CHECK: %[[VAL_31:.*]], %[[VAL_32:.*]] = gpu.alloc async {{\[}}%[[VAL_28]]] (%[[VAL_29]], %[[VAL_30]]) : memref // CHECK: %[[VAL_33:.*]] = gpu.memcpy async {{\[}}%[[VAL_32]]] %[[VAL_31]], %[[VAL_27]] : memref, memref -// CHECK: %[[VAL_34:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_34:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_35:.*]] = gpu.wait async // CHECK: %[[VAL_36:.*]] = memref.dim %[[VAL_34]], %[[VAL_3]] : memref // CHECK: %[[VAL_37:.*]] = memref.dim %[[VAL_34]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir index 088e468cee79..dea71fa03c77 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir @@ -30,12 +30,12 @@ module { // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_10]], %[[VAL_3]] : memref // CHECK: %[[VAL_23:.*]], %[[VAL_24:.*]] = gpu.alloc async {{\[}}%[[VAL_21]]] (%[[VAL_22]]) : memref // CHECK: %[[VAL_25:.*]] = gpu.memcpy async {{\[}}%[[VAL_24]]] %[[VAL_23]], %[[VAL_10]] : memref, memref -// CHECK: %[[VAL_26:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_26:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_27:.*]] = gpu.wait async // CHECK: %[[VAL_28:.*]] = memref.dim %[[VAL_26]], %[[VAL_3]] : memref // CHECK: %[[VAL_29:.*]], %[[VAL_30:.*]] = gpu.alloc async {{\[}}%[[VAL_27]]] (%[[VAL_28]]) : memref // CHECK: %[[VAL_31:.*]] = gpu.memcpy async {{\[}}%[[VAL_30]]] %[[VAL_29]], %[[VAL_26]] : memref, memref -// CHECK: %[[VAL_32:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_32:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_33:.*]] = gpu.wait async // CHECK: %[[VAL_34:.*]] = memref.dim %[[VAL_32]], %[[VAL_3]] : memref // CHECK: %[[VAL_35:.*]], %[[VAL_36:.*]] = gpu.alloc async {{\[}}%[[VAL_33]]] (%[[VAL_34]]) : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir index 1058bc03fe9c..6675df2be0c5 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir @@ -28,11 +28,11 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_7:.*]] = gpu.wait async // CHECK: %[[VAL_8:.*]], %[[VAL_9:.*]] = gpu.alloc async {{\[}}%[[VAL_7]]] () : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = gpu.memcpy async {{\[}}%[[VAL_9]]] %[[VAL_8]], %[[VAL_6]] : memref<8x8xf64>, memref<8x8xf64> -// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_12:.*]] = gpu.wait async // CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] () : memref<8x8xf64> // CHECK: %[[VAL_15:.*]] = gpu.memcpy async {{\[}}%[[VAL_14]]] %[[VAL_13]], %[[VAL_11]] : memref<8x8xf64>, memref<8x8xf64> diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir index 32741086b9e6..7b7657a0e9ba 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir @@ -30,13 +30,13 @@ // CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor // CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK: %[[VAL_10:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor -// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_12:.*]] = gpu.wait async // CHECK: %[[VAL_13:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref // CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_11]], %[[VAL_4]] : memref // CHECK: %[[VAL_15:.*]], %[[VAL_16:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] (%[[VAL_13]], %[[VAL_14]]) : memref // CHECK: %[[VAL_17:.*]] = gpu.memcpy async {{\[}}%[[VAL_16]]] %[[VAL_15]], %[[VAL_11]] : memref, memref -// CHECK: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_18:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_19:.*]] = gpu.wait async // CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_18]], %[[VAL_3]] : memref // CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_18]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir index 857967bcf521..cf1eb3e9e44f 100644 --- a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir +++ b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir @@ -14,8 +14,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = tensor.empty() : tensor<77xi1, #{{.*}}> -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<1x77xi1> -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<1x77xi1> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<1x77xi1> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<1x77xi1> // CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_10:.*]] = %[[VAL_5]]) -> (tensor<77xi1, #{{.*}}>) { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> diff --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir index 5ed1558a5316..c7022706f1e0 100644 --- a/mlir/test/Dialect/SparseTensor/dense.mlir +++ b/mlir/test/Dialect/SparseTensor/dense.mlir @@ -40,7 +40,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] { @@ -79,7 +79,7 @@ func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index @@ -122,7 +122,7 @@ func.func @dense2(%arga: tensor<32x16xf32>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index diff --git a/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir b/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir index 275f7f2ff25f..d828afe13c62 100644 --- a/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir +++ b/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir @@ -30,7 +30,7 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4xf32, #sparse> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_10]] : +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_10]] : // CHECK-DAG: linalg.fill ins(%[[VAL_8]] : f32) outs(%[[VAL_14]] : memref<8x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_5]] { // CHECK: %[[VAL_16:.*]] = arith.subi %[[VAL_15]], %[[VAL_7]] : index diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir index 58f182dbdc44..81d300e851ec 100644 --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -101,7 +101,7 @@ func.func @sparse_scale(%argx: tensor) -> tensor to memref> // C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref -// C_HECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// C_HECK: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // C_HECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // C_HECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // C_HECK: %[[VAL_13:.*]] = scf.while (%[[VAL_14:.*]] = %[[VAL_11]]) : (index) -> index { @@ -170,7 +170,7 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>, // C_HECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref -// C_HECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x64xf64> to memref<32x64xf64> +// C_HECK: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x64xf64> to memref<32x64xf64> // C_HECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>) // C_HECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // C_HECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir index 003dcc6708d6..a2f3f7704ddd 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -21,7 +21,7 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref @@ -51,7 +51,7 @@ func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK: %[[VAL_INITTENSOR:.*]] = tensor.empty() : tensor<32xf32> // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : tensor<32xf32> to memref<32xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_INITTENSOR]] : tensor<32xf32> to memref<32xf32> // CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_7]] : memref<32xf32>) // CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] { // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref @@ -81,7 +81,7 @@ func.func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref @@ -115,7 +115,7 @@ func.func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32xf32>) // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { // CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index @@ -165,7 +165,7 @@ func.func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) @@ -205,7 +205,7 @@ func.func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -247,8 +247,8 @@ func.func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref @@ -278,8 +278,8 @@ func.func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref @@ -309,11 +309,11 @@ func.func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32xf32> to memref<32xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref @@ -366,11 +366,11 @@ func.func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32xf32> to memref<32xf32> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -406,8 +406,8 @@ func.func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -463,8 +463,8 @@ func.func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -500,7 +500,7 @@ func.func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -582,7 +582,7 @@ func.func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -643,7 +643,7 @@ func.func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -735,7 +735,7 @@ func.func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, % // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -830,7 +830,7 @@ func.func @two_way_inv_alt(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]][] : memref @@ -875,7 +875,7 @@ func.func @sum_reduction(%arga: tensor, %argx: tensor) -> tenso // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_11]][] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -977,11 +977,11 @@ func.func @sum_reduction_ss(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_13]][] : memref // CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_9]][] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -1089,16 +1089,16 @@ func.func @sum_reduction_inv(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_16:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor -// CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_4]] +// CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_buffer %[[VAL_4]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_18]] : memref) // CHECK-DAG: %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref // CHECK-DAG: %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref @@ -1272,7 +1272,7 @@ func.func @four_tensors_op(%arga: tensor, // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_15]][] : memref // CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_19:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir index 9c34e54db6c8..faf6404a9656 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -25,8 +25,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -62,8 +62,8 @@ func.func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_10]] : memref<32x16xi1>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -98,8 +98,8 @@ func.func @cmp_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -137,8 +137,8 @@ func.func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref @@ -202,8 +202,8 @@ func.func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref @@ -265,8 +265,8 @@ func.func @cmp_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref @@ -306,8 +306,8 @@ func.func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -376,8 +376,8 @@ func.func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref @@ -444,8 +444,8 @@ func.func @cmp_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -488,8 +488,8 @@ func.func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -584,8 +584,8 @@ func.func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_16]] : memref<32x16xi1>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref @@ -679,8 +679,8 @@ func.func @cmp_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32x16xf32>) // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -726,7 +726,7 @@ func.func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -891,7 +891,7 @@ func.func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i1) outs(%[[VAL_17]] : memref<32x16xi1>) // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref @@ -1166,7 +1166,7 @@ func.func @sub_ss_batched(%0: tensor<2x3xf64, #BatchedVector>, %1: tensor<2x3xf6 // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -1260,7 +1260,7 @@ func.func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -1362,7 +1362,7 @@ func.func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -1415,8 +1415,8 @@ func.func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<16xf32> to memref<16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<16xf32> to memref<16xf32> // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index @@ -1464,7 +1464,7 @@ func.func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_9]]) -> (f32) { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref @@ -1511,7 +1511,7 @@ func.func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor) -> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[VAL_3]] : tensor -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_11]] : memref) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_3]] to %[[VAL_8]] step %[[VAL_4]] { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref @@ -1563,9 +1563,9 @@ func.func @scale(%arga: tensor, %argx: tensor) -> tensor // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_4]] { @@ -1638,10 +1638,10 @@ func.func @sampled_dense_dense(%args: tensor, // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref -// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : tensor to memref +// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor to memref // CHECK-DAG: %[[VAL_22:.*]] = sparse_tensor.lvl %[[VAL_2]], %[[VAL_6]] : tensor to memref +// CHECK-DAG: %[[VAL_24:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor to memref // CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_21]][] : memref // CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir index 9158ac427763..f6ecfa0beba2 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index @@ -75,8 +75,8 @@ func.func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index @@ -120,8 +120,8 @@ func.func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_9]] { // CHECK: %[[VAL_18:.*]] = arith.muli %[[VAL_16]], %[[VAL_5]] : index @@ -187,8 +187,8 @@ func.func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] { // CHECK: %[[VAL_16:.*]] = arith.muli %[[VAL_14]], %[[VAL_5]] : index @@ -234,8 +234,8 @@ func.func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_3]] step %[[VAL_8]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref @@ -305,8 +305,8 @@ func.func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref @@ -354,8 +354,8 @@ func.func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] { // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref @@ -450,8 +450,8 @@ func.func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref @@ -499,8 +499,8 @@ func.func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref @@ -575,8 +575,8 @@ func.func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -625,8 +625,8 @@ func.func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref @@ -726,8 +726,8 @@ func.func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -778,8 +778,8 @@ func.func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_16]] : memref<32x16x8xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref @@ -883,8 +883,8 @@ func.func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -937,8 +937,8 @@ func.func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_19]] : memref<32x16x8xf32>) // CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref // CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref @@ -1067,8 +1067,8 @@ func.func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -1127,11 +1127,11 @@ func.func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_6]] : tensor -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_14:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_5]] to %[[VAL_13]] step %[[VAL_6]] { // CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_10]] : index // CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_6]] { @@ -1191,7 +1191,7 @@ func.func @kernel_3d(%arga: tensor, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -1246,10 +1246,10 @@ func.func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor) // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor // CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_12]][] : memref // CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) { // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref @@ -1305,9 +1305,9 @@ func.func @sum_reduction_inv(%arga: tensor, // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20xf32> to memref<20xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<30xf32> to memref<30xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<10x20x30xf32> to memref<10x20x30xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20xf32> to memref<20xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<30xf32> to memref<30xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor<10x20x30xf32> to memref<10x20x30xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<10x20x30xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir index e2dbadc4db5b..973f8f575ed7 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -25,8 +25,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<4xf32> to memref<4xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<4xf32> to memref<4xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf32> to memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<4xf32> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -112,8 +112,8 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34xi32> to memref<34xi32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi32> to memref<32xi32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<34xi32> to memref<34xi32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi32> to memref<32xi32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : i32) outs(%[[VAL_11]] : memref<32xi32>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -163,8 +163,8 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_3]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref // CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_3]] : index @@ -223,7 +223,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>, // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_5]] { @@ -287,7 +287,7 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>, // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_batch.mlir b/mlir/test/Dialect/SparseTensor/sparse_batch.mlir index cfddef743cf2..88e93be62a9e 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_batch.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_batch.mlir @@ -14,7 +14,7 @@ // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<8x4x2xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_6]] : tensor<8x4x2xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_10]] : memref<8x4x2xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_2]] to %[[VAL_5]] step %[[VAL_1]] { // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_2]] to %[[VAL_4]] step %[[VAL_1]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir index d1d8276f8dae..69c0d8c84abb 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -38,7 +38,7 @@ // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -70,7 +70,7 @@ func.func @abs(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -102,7 +102,7 @@ func.func @ceil(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -134,7 +134,7 @@ func.func @floor(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -169,8 +169,8 @@ func.func @neg(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -229,8 +229,8 @@ func.func @add(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -289,8 +289,8 @@ func.func @sub(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -325,7 +325,7 @@ func.func @mul(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir b/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir index d9f48afef481..352a0fa24230 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir @@ -25,7 +25,7 @@ // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<100xf64, #sparse> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_8]] : +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_8]] : // CHECK-DAG: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_12]] : memref<100xf64>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir index 3a33a200f827..be96dbf10242 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -94,8 +94,8 @@ func.func @add(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -154,8 +154,8 @@ func.func @sub(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -190,7 +190,7 @@ func.func @mul(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -224,7 +224,7 @@ func.func @divsbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -258,8 +258,8 @@ func.func @divubyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -296,8 +296,8 @@ func.func @and(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -356,8 +356,8 @@ func.func @or(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -414,7 +414,7 @@ func.func @xor(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -448,7 +448,7 @@ func.func @ashrbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -482,7 +482,7 @@ func.func @lsrbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir index d215ebb1c0c6..5f2aa5e3a273 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir @@ -18,8 +18,8 @@ // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30xf32> to memref<20x30xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20x30xf32> to memref<20x30xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_5]] { @@ -58,13 +58,13 @@ func.func @matmul1(%a: tensor<10x20xf32, #DCSR>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20xf32> to memref<10x20xf32> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<10x20xf32> to memref<10x20xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref @@ -203,13 +203,13 @@ func.func @matmul2(%A: tensor<4x8xf64, #DCSR>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 6 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xi32> to memref<8x8xi32> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<8x8xi32> to memref<8x8xi32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x6xi32> to memref<6x6xi32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<6x6xi32> to memref<6x6xi32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_14]]] : memref<6x6xi32> @@ -255,13 +255,13 @@ func.func @conv2d(%input: tensor<8x8xi32>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : i64 -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<5x3xi8> to memref<5x3xi8> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<5x3xi8> to memref<5x3xi8> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<5x6xi64> to memref<5x6xi64> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<5x6xi64> to memref<5x6xi64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref @@ -309,7 +309,7 @@ func.func @quantized_matmul(%input1: tensor<5x3xi8>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir index 836e26b51f7c..f6f7f396adab 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir @@ -85,7 +85,7 @@ func.func @sqsum(%arg0: tensor) -> tensor { // CHECK: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 // CHECK: %[[VAL_5:.*]] = arith.constant dense<0> : tensor<10xi32> -// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<10xi32> to memref<10xi32> +// CHECK: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor<10xi32> to memref<10xi32> // CHECK: linalg.fill ins(%[[VAL_4]] : i32) outs(%[[VAL_6]] : memref<10xi32>) // CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref // CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir index cab57389f032..2866e115065d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -29,8 +29,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref // CHECK-HIR-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index @@ -60,8 +60,8 @@ // CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-MIR-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref // CHECK-MIR-DAG: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_5]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir index b998eeb0d394..17c3c29cf521 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -32,8 +32,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[DEMAP]] {level = 1 : index} // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[DEMAP]] {level = 1 : index} // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]] -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref @@ -62,8 +62,8 @@ // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK-MIR: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<64xf64> // CHECK-MIR: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir index e1e1953779fa..f2a29a550ed0 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir @@ -29,8 +29,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref // CHECK-HIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index @@ -60,8 +60,8 @@ // CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-MIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref // CHECK-MIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir index b80a48363773..8f06df3c9b98 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -35,13 +35,13 @@ // CHECK-DAG: %[[VAL_10:.*]] = arith.constant 80 : index // CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> +// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] { // CHECK: %[[VAL_23:.*]] = arith.muli %[[VAL_21]], %[[VAL_9]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir index ab7a30e2f96a..ebb5ab6075da 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir @@ -19,7 +19,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_8]] : memref<10xf32>) // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -53,7 +53,7 @@ func.func @allout_inplace(%arga: tensor<10xi32, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_2]] : f32) outs(%[[VAL_8]] : memref<10xf32>) // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -86,7 +86,7 @@ func.func @allout_materialize(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> { // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir index 91e3842bdd36..4546d3367b16 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir @@ -12,12 +12,12 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 100 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<2xindex> to memref<2xindex> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<2xindex> to memref<2xindex> // CHECK-DAG: %[[VAL_7:.*]] = memref.cast %[[VAL_6]] : memref<2xindex> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x2xi32> to memref<6x2xi32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<6x2xi32> to memref<6x2xi32> // CHECK-DAG: %[[VAL_9:.*]] = memref.collapse_shape %[[VAL_8]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32> // CHECK-DAG: %[[VAL_10:.*]] = memref.cast %[[VAL_9]] : memref<12xi32> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<6xf64> to memref<6xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<6xf64> to memref<6xf64> // CHECK-DAG: %[[VAL_12:.*]] = memref.cast %[[VAL_11]] : memref<6xf64> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.init // CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.set %[[VAL_13]] lvl_sz at 0 with %[[VAL_4]] @@ -45,18 +45,18 @@ func.func @sparse_pack(%values: tensor<6xf64>, %pos:tensor<2xindex>, %coordinate // CHECK-SAME: %[[VAL_5:.*]]: tensor<2xindex>, // CHECK-SAME: %[[VAL_6:.*]]: tensor<6x2xi32>) -> (tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>) { // CHECK: %[[VAL_7:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] pos_mem_sz at 0 -// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<2xindex> to memref<2xindex> +// CHECK: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor<2xindex> to memref<2xindex> // CHECK: %[[VAL_9:.*]] = memref.subview %[[VAL_8]][0] {{\[}}%[[VAL_7]]] [1] : memref<2xindex> to memref // CHECK: %[[VAL_10:.*]] = memref.subview %[[VAL_0]][0] {{\[}}%[[VAL_7]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_10]], %[[VAL_9]] : memref to memref // CHECK: %[[VAL_11:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] crd_mem_sz at 0 -// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<6x2xi32> to memref<6x2xi32> +// CHECK: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_6]] : tensor<6x2xi32> to memref<6x2xi32> // CHECK: %[[VAL_13:.*]] = memref.collapse_shape %[[VAL_12]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32> // CHECK: %[[VAL_14:.*]] = memref.subview %[[VAL_13]][0] {{\[}}%[[VAL_11]]] [1] : memref<12xi32> to memref // CHECK: %[[VAL_15:.*]] = memref.subview %[[VAL_1]][0] {{\[}}%[[VAL_11]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_15]], %[[VAL_14]] : memref to memref // CHECK: %[[VAL_16:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] val_mem_sz -// CHECK: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<6xf64> to memref<6xf64> +// CHECK: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<6xf64> to memref<6xf64> // CHECK: %[[VAL_18:.*]] = memref.subview %[[VAL_17]][0] {{\[}}%[[VAL_16]]] [1] : memref<6xf64> to memref // CHECK: %[[VAL_19:.*]] = memref.subview %[[VAL_2]][0] {{\[}}%[[VAL_16]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_19]], %[[VAL_18]] : memref to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir index c2cabd435111..1cfa8571a9f0 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir @@ -24,8 +24,8 @@ // CHECK-DAG: %[[TMP_0:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} // CHECK-DAG: %[[TMP_1:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} // CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.values %[[TMP_arg0]] -// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_memref %[[TMP_arg1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_memref %[[TMP_arg2]] : tensor<16xf32> to memref<16xf32> +// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_buffer %[[TMP_arg1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_buffer %[[TMP_arg2]] : tensor<16xf32> to memref<16xf32> // CHECK: scf.parallel (%[[TMP_arg3:.*]]) = (%[[TMP_c0]]) to (%[[TMP_c16]]) step (%[[TMP_c1]]) { // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<16xf32> // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_0]][%[[TMP_arg3]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir index 5f8002b5b6d3..289939fbfc16 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir @@ -24,7 +24,7 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK: %[[DEMAP:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[DEMAP]] : tensor<30x10x20xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30x10xf32> to memref<20x30x10xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20x30x10xf32> to memref<20x30x10xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_9]] : memref<20x30x10xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_12:.*]] = arith.muli %[[VAL_10]], %[[VAL_4]] : index @@ -64,7 +64,7 @@ func.func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_3]] : tensor // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_10]] : memref) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_4]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_8]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir index 93b5da41fc7f..4abaf03dff50 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir @@ -26,7 +26,7 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]] : tensor -// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-HIR: %[[VAL_11:.*]] = tensor.extract %[[VAL_1]][] : tensor // CHECK-HIR: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_3]] to %[[VAL_5]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-HIR: %[[VAL_18:.*]] = arith.muli %[[VAL_13]], %[[VAL_6]] : index @@ -58,7 +58,7 @@ // CHECK-MIR-DAG: %[[DimSize1:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I1]]) // CHECK-MIR-DAG: %[[DimSize2:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I2]]) // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF32(%[[ARGA]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[ARGX]] : tensor to memref +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[ARGX]] : tensor to memref // CHECK-MIR: %[[VAL_11:.*]] = tensor.extract %[[ARGX]][] : tensor // CHECK-MIR: %[[VAL_12:.*]] = scf.for %[[D2:.*]] = %[[I0]] to %[[DimSize0]] step %[[I1]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-MIR: %[[VAL_18:.*]] = arith.muli %[[D2]], %[[DimSize1]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir index e5df646851d4..8d1f62f69f0f 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_14]][] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref // CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir index e769534641ec..d653e144fb3b 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -64,14 +64,14 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> { // CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK-DAG: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64> // CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_8]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_8]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_5]] { @@ -132,8 +132,8 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>, // CHECK-DAG: %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) : tensor<8x8xf64> // CHECK-DAG: %[[VAL_10:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir index 3cc0aa26c8bc..39962b46d5d5 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir @@ -30,8 +30,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant false // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir index c99d5d25f7b4..f4b565c7f9c8 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir @@ -31,7 +31,7 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_14]][] : memref // CHECK: %[[VAL_16:.*]] = scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] iter_args(%[[VAL_18:.*]] = %[[VAL_15]]) -> (f64) { // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir index d88372276989..e9587edef467 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir @@ -28,7 +28,7 @@ // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_7]] : tensor<8xi64> to memref<8xi64> // CHECK-DAG: linalg.fill ins(%[[VAL_4]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref @@ -70,7 +70,7 @@ func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_7]] : tensor<8xi64> to memref<8xi64> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir index 6c3acf43f241..0c73d2fe8a07 100644 --- a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir @@ -24,8 +24,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir b/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir index df1e564c0623..a673b0dacf4a 100755 --- a/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir +++ b/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir @@ -37,8 +37,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] : tensor to tensor // CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.lvl %[[VAL_7]], %[[VAL_4]] : tensor // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_7]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_7]] {level = 1 : index} : tensor to memref diff --git a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir index 7e8b9f83fac7..526c3f4f8830 100644 --- a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir +++ b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir @@ -28,8 +28,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 4 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<2x4xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<2x4xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<2x4xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<2x4xf64> // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] { // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] { diff --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir index 15228c6a5f79..01b717090e87 100644 --- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir +++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir @@ -16,7 +16,7 @@ // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -42,7 +42,7 @@ // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -93,7 +93,7 @@ func.func @sparse_reduction_ori(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -119,7 +119,7 @@ func.func @sparse_reduction_ori(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -168,7 +168,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -194,7 +194,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -243,7 +243,7 @@ func.func @sparse_reduction_subi(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -269,7 +269,7 @@ func.func @sparse_reduction_subi(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -319,7 +319,7 @@ func.func @sparse_reduction_xor(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -345,7 +345,7 @@ func.func @sparse_reduction_xor(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -395,7 +395,7 @@ func.func @sparse_reduction_addi(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -421,7 +421,7 @@ func.func @sparse_reduction_addi(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -471,7 +471,7 @@ func.func @sparse_reduction_subf(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -497,7 +497,7 @@ func.func @sparse_reduction_subf(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir index 9ea0a15f3118..4e13f8ea00a4 100644 --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: func @dim( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>, // CHECK-SAME: %[[INDEX:.*]]: index) -> index { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<*xf32> to memref<*xf32> // CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref<*xf32> // CHECK: return %[[EXTENT]] : index func.func @dim(%arg0: tensor<*xf32>, %arg1: index) -> index { @@ -15,7 +15,7 @@ func.func @dim(%arg0: tensor<*xf32>, %arg1: index) -> index { // CHECK-LABEL: func @rank( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> index { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] // CHECK: %[[EXTENT:.*]] = memref.rank %[[MEMREF]] : memref<*xf32> func.func @rank(%arg0: tensor<*xf32>) -> index { %0 = tensor.rank %arg0 : tensor<*xf32> @@ -26,7 +26,7 @@ func.func @rank(%arg0: tensor<*xf32>) -> index { // CHECK-LABEL: func @tensor.cast( // CHECK-SAME: %[[TENSOR:.*]]: tensor) -> tensor<2xindex> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] // CHECK: %[[CASTED:.*]] = memref.cast %[[MEMREF]] : memref to memref<2xindex> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED]] // CHECK: return %[[RET]] : tensor<2xindex> @@ -39,7 +39,7 @@ func.func @tensor.cast(%arg0: tensor) -> tensor<2xindex> { // CHECK-LABEL: func @tensor.cast_from_unranked( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> tensor<2xf32> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<*xf32> to memref<*xf32> // CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<*xf32> to memref<2xf32, strided<[?], offset: ?>> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<2xf32, strided<[?], offset: ?>> // CHECK: return %[[RET]] : tensor<2xf32> @@ -52,7 +52,7 @@ func.func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> { // CHECK-LABEL: func @tensor.cast_to_unranked( // CHECK-SAME: %[[TENSOR:.*]]: tensor<2xf32>) -> tensor<*xf32> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<2xf32> to memref<2xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<2xf32> to memref<2xf32> // CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<2xf32> to memref<*xf32> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<*xf32> // CHECK: return %[[RET]] : tensor<*xf32> @@ -77,7 +77,7 @@ func.func @tensor.empty() -> tensor<5xf32> { // CHECK-LABEL: func @tensor.extract( // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[IDX:.*]]: index) -> f32 { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // CHECK: %[[RET:.*]] = memref.load %[[MEMREF]][%[[IDX]]] : memref // CHECK: return %[[RET]] : f32 // CHECK: } @@ -199,7 +199,7 @@ func.func @tensor.from_elements_3d(%f0 : f32) -> tensor<3x2x2xf32> { // CHECK-LABEL: func @tensor.generate( // CHECK-SAME: %[[ARG:.*]]: tensor<*xf32>, // CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor { -// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_memref %[[ARG]] : tensor<*xf32> to memref<*xf32> +// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_buffer %[[ARG]] : tensor<*xf32> to memref<*xf32> // CHECK-DAG: %[[ALLOC:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref // CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]] // CHECK: %[[MAPPED:.*]] = linalg.map @@ -266,7 +266,7 @@ func.func @tensor.generate_unknown_ops_in_body(%arg0: index) -> tensor // CHECK-SAME: %[[t1:.*]]: tensor, %[[idx1:.*]]: index, %[[idx2:.*]]: index func.func @tensor.extract_slice( %t1: tensor, %idx1: index, %idx2: index) -> tensor { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref to memref> %0 = tensor.extract_slice %t1[5, %idx2][%idx1, 10][1, 1] : tensor to tensor @@ -282,7 +282,7 @@ func.func @tensor.extract_slice( // CHECK-SAME: %[[idx2:.*]]: index func.func @tensor.extract_slice_rank_reducing( %t1: tensor, %idx1: index, %idx2: index) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref to memref> %0 = tensor.extract_slice %t1[5, %idx1, 10][%idx2, 1, 15][1, 1, 1] : tensor to tensor @@ -300,8 +300,8 @@ func.func @tensor.insert_slice(%t1: tensor, %t2: tensor, %idx1: index, %idx2: index) -> tensor { // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref - // CHECK-DAG: %[[m2:.*]] = bufferization.to_memref %[[t2]] : tensor to memref + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref + // CHECK-DAG: %[[m2:.*]] = bufferization.to_buffer %[[t2]] : tensor to memref // CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]] // CHECK-DAG: %[[dim1:.*]] = memref.dim %[[m1]], %[[c1]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim0]], %[[dim1]]) @@ -353,7 +353,7 @@ func.func @tensor.insert_slice_rank_reducing_2( // CHECK-SAME: %[[f:.*]]: f32 func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5xf32> { // CHECK-DAG: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32> + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<5xf32> to memref<5xf32> // CHECK: memref.copy %[[m1]], %[[alloc]] // CHECK: memref.store %[[f]], %[[alloc]][%[[idx1]]] %0 = tensor.insert %f into %t1[%idx1] : tensor<5xf32> @@ -368,7 +368,7 @@ func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5x // CHECK-LABEL: func @tensor.expand_shape( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.expand_shape(%t1: tensor, %sz0: index) -> tensor<2x?x10xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[DIM:.*]] = memref.dim %[[m1]], %[[C0]] : memref // CHECK: %[[C2:.*]] = arith.constant 2 : index @@ -388,7 +388,7 @@ func.func @tensor.expand_shape(%t1: tensor, %sz0: index) -> tensor<2x? // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.expand_shape_of_slice( %t1: tensor, %o1: index, %s1: index, %sz0: index) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref to memref> %0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] : tensor to tensor @@ -408,7 +408,7 @@ func.func @tensor.expand_shape_of_slice( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.expand_shape_of_scalar_slice( %t1: tensor, %o1: index, %s1: index) -> tensor<1xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref to memref> %0 = tensor.extract_slice %t1[%o1][1][1] : tensor to tensor // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] output_shape [1] : memref into memref<1xf32, strided<[1], offset: ?>> @@ -423,7 +423,7 @@ func.func @tensor.expand_shape_of_scalar_slice( // CHECK-LABEL: func @tensor.collapse_shape( // CHECK-SAME: %[[t1:.*]]: tensor<2x?x?xf32> func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<2x?x?xf32> to memref<2x?x?xf32> + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<2x?x?xf32> to memref<2x?x?xf32> // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [ // CHECK-SAME: [0, 1], [2]] : memref<2x?x?xf32> into memref %0 = tensor.collapse_shape %t1 [[0, 1], [2]] @@ -439,7 +439,7 @@ func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor { // CHECK-LABEL: func @tensor.collapse_shape_to_scalar( // CHECK-SAME: %[[t1:.*]]: tensor<1x1x1xf32> func.func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<1x1x1xf32> to memref<1x1x1xf32> + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<1x1x1xf32> to memref<1x1x1xf32> // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [] : memref<1x1x1xf32> into memref %0 = tensor.collapse_shape %t1 [] : tensor<1x1x1xf32> into tensor @@ -528,7 +528,7 @@ func.func @tensor.collapse_shape_of_slice5(%arg0: tensor<2x2x2xi64>) -> tensor<4 // CHECK-LABEL: func @tensor.reshape( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[two:.*]] = arith.constant 2 : i64 %two = arith.constant 2 : i64 @@ -560,7 +560,7 @@ func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> { // CHECK-SAME: %[[t1:.*]]: tensor, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index func.func @tensor.pad(%t1: tensor, %l2: index, %h1: index, %h2: index) -> tensor { - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]] @@ -576,7 +576,7 @@ func.func @tensor.pad(%t1: tensor, %l2: index, %h1: index, // CHECK: %[[mul:.*]] = arith.muli %[[index0]], %[[index1]] // CHECK: linalg.yield %[[mul]] // CHECK: } - // CHECK: %[[mapped_m:.*]] = bufferization.to_memref %[[mapped]] + // CHECK: %[[mapped_m:.*]] = bufferization.to_buffer %[[mapped]] // CHECK: %[[subview:.*]] = memref.subview %[[mapped_m]][5, %[[l2]]] [%[[dim0]], 10] [1, 1] // CHECK: memref.copy %[[m1]], %[[subview]] %0 = tensor.pad %t1 low[5, %l2] high[%h1, %h2] { diff --git a/mlir/test/Dialect/Vector/bufferize.mlir b/mlir/test/Dialect/Vector/bufferize.mlir index c2abebe706ac..887fb941cc65 100644 --- a/mlir/test/Dialect/Vector/bufferize.mlir +++ b/mlir/test/Dialect/Vector/bufferize.mlir @@ -2,7 +2,7 @@ // CHECK-LABEL: func @transfer_read( // CHECK-SAME: %[[t:.*]]: tensor, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[pad:.*]]: f32) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK: %[[r:.*]] = vector.transfer_read %[[m]][%[[o1]], %[[o2]]], %[[pad]] {in_bounds = [true, false]} : memref, vector<5x6xf32> // CHECK: return %[[r]] func.func @transfer_read(%t: tensor, %o1: index, @@ -16,7 +16,7 @@ func.func @transfer_read(%t: tensor, %o1: index, // CHECK-LABEL: func @transfer_write( // CHECK-SAME: %[[t:.*]]: tensor, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[vec:.*]]: vector<5x6xf32>, %[[mask:.*]]: vector<5x6xi1>) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK: %[[alloc:.*]] = memref.alloc(%{{.*}}, %{{.*}}) {{.*}} : memref // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: vector.transfer_write %[[vec]], %[[alloc]][%[[o1]], %[[o2]]], %[[mask]] {in_bounds = [true, false]} : vector<5x6xf32>, memref @@ -35,7 +35,7 @@ func.func @transfer_write(%t: tensor, %o1: index, // CHECK-LABEL: func @gather( // CHECK-SAME: %[[base:.*]]: tensor, %[[v:.*]]: vector<16xi32>, // CHECK-SAME: %[[mask:.*]]: vector<16xi1>, %[[pass_thru:.*]]: vector<16xf32>) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[base]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[base]] : tensor to memref // CHECK: %[[c0:.*]] = arith.constant 0 : index // CHECK: %[[out:.*]] = vector.gather %[[m]][%[[c0]], %[[c0]]] [%[[v]]], %[[mask]], %[[pass_thru]] : memref, vector<16xi32>, vector<16xi1>, vector<16xf32> into vector<16xf32> func.func @gather(%base: tensor, %v: vector<16xi32>, %mask: vector<16xi1>, %pass_thru: vector<16xf32>) -> vector<16xf32> { diff --git a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir index 1d0f871f06b4..d7174a489888 100644 --- a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir +++ b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s | FileCheck %s // Verify the printed output can be parsed. // RUN: mlir-opt %s | mlir-opt | FileCheck %s diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir index 4255538b9ba1..7816bff0582f 100644 --- a/mlir/test/Dialect/XeGPU/invalid.mlir +++ b/mlir/test/Dialect/XeGPU/invalid.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -split-input-file -verify-diagnostics // ----- diff --git a/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir b/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir index 58dbefa0fdaa..d32954127fce 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir @@ -1,4 +1,3 @@ -// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt -xegpu-fold-alias-ops -split-input-file %s | FileCheck %s func.func @fold_subview_with_xegpu_create_nd_tdesc(%arg0 : memref<256x256xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) ->(!xegpu.tensor_desc<8x16xf32>) { diff --git a/mlir/test/IR/invalid-builtin-types.mlir b/mlir/test/IR/invalid-builtin-types.mlir index 441f036a81ce..51612446d2e6 100644 --- a/mlir/test/IR/invalid-builtin-types.mlir +++ b/mlir/test/IR/invalid-builtin-types.mlir @@ -115,17 +115,17 @@ func.func @illegaltype(i21312312323120) // expected-error {{invalid integer widt // ----- // Test no nested vector. -// expected-error@+1 {{vector elements must be int/index/float type}} +// expected-error@+1 {{failed to verify 'elementType': integer or index or floating-point}} func.func @vectors(vector<1 x vector<1xi32>>, vector<2x4xf32>) // ----- -// expected-error @+1 {{vector types must have positive constant sizes}} +// expected-error @+1 {{vector types must have positive constant sizes but got 0}} func.func @zero_vector_type() -> vector<0xi32> // ----- -// expected-error @+1 {{vector types must have positive constant sizes}} +// expected-error @+1 {{vector types must have positive constant sizes but got 1, 0}} func.func @zero_in_vector_type() -> vector<1x0xi32> // ----- diff --git a/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir b/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir index 3c508fbb67a1..06bc0e7ef44e 100644 --- a/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir +++ b/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir @@ -54,7 +54,7 @@ func.func @main() { %result_static = func.call @max_pool_static(%A) : (!tensor_type) -> !tensor_type %result_dynamic = func.call @max_pool_dynamic(%A_dynamic) : (tensor) -> tensor - %static_buffer = bufferization.to_memref %result_static : !tensor_type to !memref_type + %static_buffer = bufferization.to_buffer %result_static : !tensor_type to !memref_type %unranked_static_buffer = memref.cast %static_buffer : !memref_type to memref<*xf32> // CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data = @@ -81,7 +81,7 @@ func.func @main() { func.call @printMemrefF32(%unranked_static_buffer) : (memref<*xf32>) -> () - %dynamic_buffer = bufferization.to_memref %result_dynamic : tensor to memref + %dynamic_buffer = bufferization.to_buffer %result_dynamic : tensor to memref %unranked_dynamic_buffer = memref.cast %dynamic_buffer : memref to memref<*xf32> // CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data = diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir index 8cf15cd69786..8014bb7d2dcc 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir @@ -100,8 +100,8 @@ func.func @entry() -> i32 { ]> : tensor<16x32xbf16> // Set up memory. - %a = bufferization.to_memref %0 : tensor<16x32xbf16> to memref<16x32xbf16> - %b = bufferization.to_memref %1 : tensor<16x32xbf16> to memref<16x32xbf16> + %a = bufferization.to_buffer %0 : tensor<16x32xbf16> to memref<16x32xbf16> + %b = bufferization.to_buffer %1 : tensor<16x32xbf16> to memref<16x32xbf16> %c = memref.alloc() : memref<16x16xf32> // Call kernel. diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir index 652ba0698c4c..a0076db6660d 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir @@ -100,8 +100,8 @@ func.func @entry() -> i32 { ]> : tensor<16x64xi8> // Set up memory. - %a = bufferization.to_memref %0 : tensor<16x64xi8> to memref<16x64xi8> - %b = bufferization.to_memref %1 : tensor<16x64xi8> to memref<16x64xi8> + %a = bufferization.to_buffer %0 : tensor<16x64xi8> to memref<16x64xi8> + %b = bufferization.to_buffer %1 : tensor<16x64xi8> to memref<16x64xi8> %c = memref.alloc() : memref<16x16xi32> // Call kernel. diff --git a/mlir/test/lib/Dialect/Bufferization/CMakeLists.txt b/mlir/test/lib/Dialect/Bufferization/CMakeLists.txt index c14a9f2cc9bb..226e0bb97732 100644 --- a/mlir/test/lib/Dialect/Bufferization/CMakeLists.txt +++ b/mlir/test/lib/Dialect/Bufferization/CMakeLists.txt @@ -1,6 +1,7 @@ # Exclude tests from libMLIR.so add_mlir_library(MLIRBufferizationTestPasses TestTensorCopyInsertion.cpp + TestTensorLikeAndBufferLike.cpp EXCLUDE_FROM_LIBMLIR ) @@ -9,4 +10,11 @@ mlir_target_link_libraries(MLIRBufferizationTestPasses PUBLIC MLIRBufferizationTransforms MLIRIR MLIRPass + MLIRTestDialect ) + +target_include_directories(MLIRBufferizationTestPasses + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/../../Dialect/Test + ${CMAKE_CURRENT_BINARY_DIR}/../../Dialect/Test + ) diff --git a/mlir/test/lib/Dialect/Bufferization/TestTensorLikeAndBufferLike.cpp b/mlir/test/lib/Dialect/Bufferization/TestTensorLikeAndBufferLike.cpp new file mode 100644 index 000000000000..60e60849f3e6 --- /dev/null +++ b/mlir/test/lib/Dialect/Bufferization/TestTensorLikeAndBufferLike.cpp @@ -0,0 +1,99 @@ +//===- TestTensorLikeAndBufferLike.cpp - Bufferization Test -----*- c++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "TestDialect.h" +#include "mlir/Dialect/Bufferization/IR/Bufferization.h" +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/Pass/Pass.h" + +#include + +using namespace mlir; + +namespace { +std::string getImplementationStatus(Type type) { + if (isa(type)) { + return "is_tensor_like"; + } + if (isa(type)) { + return "is_buffer_like"; + } + return {}; +} + +DictionaryAttr findAllImplementeesOfTensorOrBufferLike(func::FuncOp funcOp) { + llvm::SmallVector attributes; + + const auto funcType = funcOp.getFunctionType(); + for (auto [index, inputType] : llvm::enumerate(funcType.getInputs())) { + const auto status = getImplementationStatus(inputType); + if (status.empty()) { + continue; + } + + attributes.push_back( + NamedAttribute(StringAttr::get(funcOp.getContext(), + "operand_" + std::to_string(index)), + StringAttr::get(funcOp.getContext(), status))); + } + + for (auto [index, resultType] : llvm::enumerate(funcType.getResults())) { + const auto status = getImplementationStatus(resultType); + if (status.empty()) { + continue; + } + + attributes.push_back(NamedAttribute( + StringAttr::get(funcOp.getContext(), "result_" + std::to_string(index)), + StringAttr::get(funcOp.getContext(), status))); + } + + return mlir::DictionaryAttr::get(funcOp.getContext(), attributes); +} + +/// This pass tests whether specified types implement TensorLike and (or) +/// BufferLike type interfaces defined in bufferization. +/// +/// The pass analyses operation signature. When the aforementioned interface +/// implementation found, an attribute is added to the operation, signifying the +/// associated operand / result. +struct TestTensorLikeAndBufferLikePass + : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestTensorLikeAndBufferLikePass) + + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert(); + } + StringRef getArgument() const final { return "test-tensorlike-bufferlike"; } + StringRef getDescription() const final { + return "Module pass to test custom types that implement TensorLike / " + "BufferLike interfaces"; + } + + void runOnOperation() override { + auto op = getOperation(); + + op.walk([](func::FuncOp funcOp) { + const auto dict = findAllImplementeesOfTensorOrBufferLike(funcOp); + if (!dict.empty()) { + funcOp->setAttr("found", dict); + } + }); + } +}; +} // namespace + +namespace mlir::test { +void registerTestTensorLikeAndBufferLikePass() { + PassRegistration(); +} +} // namespace mlir::test diff --git a/mlir/test/lib/Dialect/Test/CMakeLists.txt b/mlir/test/lib/Dialect/Test/CMakeLists.txt index 618b13da9899..ba5bbf4997d5 100644 --- a/mlir/test/lib/Dialect/Test/CMakeLists.txt +++ b/mlir/test/lib/Dialect/Test/CMakeLists.txt @@ -92,6 +92,7 @@ mlir_target_link_libraries(MLIRTestDialect PUBLIC MLIRTransformUtils MLIRTransforms MLIRValueBoundsOpInterface + MLIRBufferizationDialect ) add_mlir_translation_library(MLIRTestFromLLVMIRTranslation diff --git a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp index f6b8a0005f28..29d7bed1cfa6 100644 --- a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp +++ b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp @@ -8,6 +8,7 @@ #include "TestDialect.h" #include "TestOps.h" +#include "mlir/Dialect/Bufferization/IR/Bufferization.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/Verifier.h" #include "mlir/Interfaces/FunctionImplementation.h" @@ -1378,3 +1379,55 @@ TestMultiSlotAlloca::handleDestructuringComplete( const DestructurableMemorySlot &slot, OpBuilder &builder) { return createNewMultiAllocaWithoutSlot(slot, builder, *this); } + +::mlir::LogicalResult test::TestDummyTensorOp::bufferize( + ::mlir::RewriterBase &rewriter, + const ::mlir::bufferization::BufferizationOptions &options) { + auto buffer = mlir::bufferization::getBuffer(rewriter, getInput(), options); + if (mlir::failed(buffer)) + return failure(); + + const auto outType = getOutput().getType(); + const auto bufferizedOutType = test::TestMemrefType::get( + getContext(), outType.getShape(), outType.getElementType(), nullptr); + // replace op with memref analogy + auto dummyMemrefOp = rewriter.create( + getLoc(), bufferizedOutType, *buffer); + + mlir::bufferization::replaceOpWithBufferizedValues(rewriter, getOperation(), + dummyMemrefOp.getResult()); + + return mlir::success(); +} + +::mlir::LogicalResult test::TestCreateTensorOp::bufferize( + ::mlir::RewriterBase &rewriter, + const ::mlir::bufferization::BufferizationOptions &options) { + // Note: mlir::bufferization::getBufferType() would internally call + // TestCreateTensorOp::getBufferType() + const auto bufferizedOutType = + mlir::bufferization::getBufferType(getOutput(), options); + if (mlir::failed(bufferizedOutType)) + return failure(); + + // replace op with memref analogy + auto createMemrefOp = + rewriter.create(getLoc(), *bufferizedOutType); + + mlir::bufferization::replaceOpWithBufferizedValues( + rewriter, getOperation(), createMemrefOp.getResult()); + + return mlir::success(); +} + +mlir::FailureOr +test::TestCreateTensorOp::getBufferType( + mlir::Value value, const mlir::bufferization::BufferizationOptions &, + llvm::SmallVector<::mlir::Value> &) { + const auto type = dyn_cast(value.getType()); + if (type == nullptr) + return failure(); + + return cast(test::TestMemrefType::get( + getContext(), type.getShape(), type.getElementType(), nullptr)); +} diff --git a/mlir/test/lib/Dialect/Test/TestOps.h b/mlir/test/lib/Dialect/Test/TestOps.h index f070c3bedd92..ea8867e3fc41 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.h +++ b/mlir/test/lib/Dialect/Test/TestOps.h @@ -13,6 +13,7 @@ #include "TestInterfaces.h" #include "TestTypes.h" #include "mlir/Bytecode/BytecodeImplementation.h" +#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/DLTI/Traits.h" #include "mlir/Dialect/Func/IR/FuncOps.h" diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index f37573c1351c..369ba2381a4b 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -30,7 +30,7 @@ include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/LoopLikeInterface.td" include "mlir/Interfaces/MemorySlotInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" - +include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td" // Include the attribute definitions. include "TestAttrDefs.td" @@ -3373,4 +3373,78 @@ def TestMultiSlotAlloca : TEST_Op<"multi_slot_alloca", let assemblyFormat = "attr-dict `:` functional-type(operands, results)"; } +//===----------------------------------------------------------------------===// +// Test Ops bufferization +//===----------------------------------------------------------------------===// + +def TestDummyTensorOp : TEST_Op<"dummy_tensor_op", + [DeclareOpInterfaceMethods]> { + let arguments = (ins + Arg:$input + ); + let results = (outs + Arg:$output + ); + + let extraClassDefinition = [{ + bool test::TestDummyTensorOp::bufferizesToMemoryRead(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return true; + } + bool test::TestDummyTensorOp::bufferizesToMemoryWrite(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return true; + } + ::mlir::bufferization::AliasingValueList + test::TestDummyTensorOp::getAliasingValues(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return {}; + } + }]; +} + +def TestDummyMemrefOp : TEST_Op<"dummy_memref_op", []> { + let arguments = (ins + Arg:$input + ); + let results = (outs + Arg:$output + ); +} + +def TestCreateTensorOp : TEST_Op<"create_tensor_op", + [DeclareOpInterfaceMethods]> { + let arguments = (ins); + let results = (outs Arg:$output); + let extraClassDefinition = [{ + bool test::TestCreateTensorOp::bufferizesToMemoryRead(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return true; + } + bool test::TestCreateTensorOp::bufferizesToMemoryWrite(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return true; + } + bool test::TestCreateTensorOp::bufferizesToAllocation(mlir::Value value) { + return false; + } + + ::mlir::bufferization::AliasingValueList + test::TestCreateTensorOp::getAliasingValues(::mlir::OpOperand&, + const ::mlir::bufferization::AnalysisState&) { + return {}; + } + }]; +} + +def TestCreateMemrefOp : TEST_Op<"create_memref_op"> { + let arguments = (ins); + let results = (outs Arg:$output); +} + #endif // TEST_OPS diff --git a/mlir/test/lib/Dialect/Test/TestTypeDefs.td b/mlir/test/lib/Dialect/Test/TestTypeDefs.td index 3b344bca58bf..de99c8abfa4d 100644 --- a/mlir/test/lib/Dialect/Test/TestTypeDefs.td +++ b/mlir/test/lib/Dialect/Test/TestTypeDefs.td @@ -19,6 +19,7 @@ include "TestAttrDefs.td" include "TestInterfaces.td" include "mlir/IR/BuiltinTypes.td" include "mlir/Interfaces/DataLayoutInterfaces.td" +include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td" // All of the types will extend this class. class Test_Type traits = []> @@ -408,4 +409,58 @@ def TestTypeNewlineAndIndent : Test_Type<"TestTypeNewlineAndIndent"> { let hasCustomAssemblyFormat = 1; } +def TestTensorType : Test_Type<"TestTensor", + [Bufferization_TensorLikeTypeInterface, ShapedTypeInterface]> { + let mnemonic = "test_tensor"; + let parameters = (ins + ArrayRefParameter<"int64_t">:$shape, + "mlir::Type":$elementType + ); + let assemblyFormat = "`<` `[` $shape `]` `,` $elementType `>`"; + + let extraClassDeclaration = [{ + // ShapedTypeInterface: + bool hasRank() const { + return true; + } + test::TestTensorType cloneWith(std::optional> shape, + mlir::Type elementType) const { + return test::TestTensorType::get( + getContext(), shape.value_or(getShape()), elementType); + } + + // TensorLikeTypeInterface: + ::mlir::FailureOr<::mlir::bufferization::BufferLikeType> + getBufferType(const ::mlir::bufferization::BufferizationOptions& options, + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError); + + ::mlir::LogicalResult verifyCompatibleBufferType( + ::mlir::bufferization::BufferLikeType bufferType, + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError); + }]; +} + +def TestMemrefType : Test_Type<"TestMemref", + [Bufferization_BufferLikeTypeInterface, ShapedTypeInterface]> { + let mnemonic = "test_memref"; + let parameters = (ins + ArrayRefParameter<"int64_t">:$shape, + "mlir::Type":$elementType, + DefaultValuedParameter<"mlir::Attribute", "nullptr">:$memSpace + ); + let assemblyFormat = "`<` `[` $shape `]` `,` $elementType (`,` $memSpace^)? `>`"; + + let extraClassDeclaration = [{ + // ShapedTypeInterface: + bool hasRank() const { + return true; + } + test::TestMemrefType cloneWith(std::optional> shape, + mlir::Type elementType) const { + return test::TestMemrefType::get( + getContext(), shape.value_or(getShape()), elementType, getMemSpace()); + } + }]; +} + #endif // TEST_TYPEDEFS diff --git a/mlir/test/lib/Dialect/Test/TestTypes.cpp b/mlir/test/lib/Dialect/Test/TestTypes.cpp index 77e5b35b68ba..40eb5481899a 100644 --- a/mlir/test/lib/Dialect/Test/TestTypes.cpp +++ b/mlir/test/lib/Dialect/Test/TestTypes.cpp @@ -564,3 +564,23 @@ void TestTypeNewlineAndIndentType::print(::mlir::AsmPrinter &printer) const { printer.printNewline(); printer << ">"; } + +::mlir::FailureOr<::mlir::bufferization::BufferLikeType> +TestTensorType::getBufferType( + const ::mlir::bufferization::BufferizationOptions &, + ::llvm::function_ref<::mlir::InFlightDiagnostic()>) { + return llvm::cast( + TestMemrefType::get(getContext(), getShape(), getElementType(), nullptr)); +} + +::mlir::LogicalResult TestTensorType::verifyCompatibleBufferType( + ::mlir::bufferization::BufferLikeType bufferType, + ::llvm::function_ref<::mlir::InFlightDiagnostic()> emitError) { + auto testMemref = llvm::dyn_cast(bufferType); + if (!testMemref) + return emitError() << "expected TestMemrefType"; + + const bool valid = getShape() == testMemref.getShape() && + getElementType() == testMemref.getElementType(); + return mlir::success(valid); +} diff --git a/mlir/test/lib/Dialect/Test/TestTypes.h b/mlir/test/lib/Dialect/Test/TestTypes.h index cef3f056a798..6499a96f495d 100644 --- a/mlir/test/lib/Dialect/Test/TestTypes.h +++ b/mlir/test/lib/Dialect/Test/TestTypes.h @@ -18,6 +18,7 @@ #include #include "TestTraits.h" +#include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/DialectImplementation.h" diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in index 3cd3bde9f216..132aabe13594 100644 --- a/mlir/test/lit.site.cfg.py.in +++ b/mlir/test/lit.site.cfg.py.in @@ -69,14 +69,6 @@ config.riscv_vector_emulator_options = "@RISCV_VECTOR_EMULATOR_OPTIONS@" config.riscv_emulator_lli_executable = "@RISCV_EMULATOR_LLI_EXECUTABLE@" config.riscv_emulator_utils_lib_dir = "@RISCV_EMULATOR_UTILS_LIB_DIR@" -# Add feature to enable/disable XeGPU Dialect tests -if "@MLIR_DIALECT_XEGPU_ENABLE@" == "ON": - config.available_features.add("xegpu-dialect-enabled") - -# Add feature to enable/disable TosaTotensor tests -if "@MLIR_CONVERSION_TOSATOTENSOR_ENABLE@" == "ON": - config.available_features.add("tosa-to-tensor-enabled") - import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp index 960f7037a1b6..4379cdfc2a86 100644 --- a/mlir/tools/mlir-opt/mlir-opt.cpp +++ b/mlir/tools/mlir-opt/mlir-opt.cpp @@ -147,6 +147,7 @@ void registerTestSPIRVCPURunnerPipeline(); void registerTestSPIRVFuncSignatureConversion(); void registerTestSPIRVVectorUnrolling(); void registerTestTensorCopyInsertionPass(); +void registerTestTensorLikeAndBufferLikePass(); void registerTestTensorTransforms(); void registerTestTopologicalSortAnalysisPass(); void registerTestTransformDialectEraseSchedulePass(); @@ -286,6 +287,7 @@ void registerTestPasses() { mlir::test::registerTestSPIRVFuncSignatureConversion(); mlir::test::registerTestSPIRVVectorUnrolling(); mlir::test::registerTestTensorCopyInsertionPass(); + mlir::test::registerTestTensorLikeAndBufferLikePass(); mlir::test::registerTestTensorTransforms(); mlir::test::registerTestTopologicalSortAnalysisPass(); mlir::test::registerTestTransformDialectEraseSchedulePass(); diff --git a/mlir/utils/tree-sitter-mlir/dialect/bufferization.js b/mlir/utils/tree-sitter-mlir/dialect/bufferization.js index 8d9fdb1fcfc3..ec878f3dc5a0 100644 --- a/mlir/utils/tree-sitter-mlir/dialect/bufferization.js +++ b/mlir/utils/tree-sitter-mlir/dialect/bufferization.js @@ -12,9 +12,9 @@ module.exports = { field('attributes', optional($.attribute)), field('return', $._type_annotation)), - // operation ::= `bufferization.to_memref` $tensor + // operation ::= `bufferization.to_buffer` $tensor // attr-dict `:` type($memref) - seq('bufferization.to_memref', + seq('bufferization.to_buffer', field('tensor', $.value_use), field('attributes', optional($.attribute)), field('return', $._type_annotation)), diff --git a/mlir/utils/tree-sitter-mlir/queries/highlights.scm b/mlir/utils/tree-sitter-mlir/queries/highlights.scm index 97aba2b266ec..4cbea7bbca03 100644 --- a/mlir/utils/tree-sitter-mlir/queries/highlights.scm +++ b/mlir/utils/tree-sitter-mlir/queries/highlights.scm @@ -209,7 +209,7 @@ "tensor.yield" "bufferization.alloc_tensor" - "bufferization.to_memref" + "bufferization.to_buffer" "bufferization.to_tensor" "linalg.batch_matmul"