Skip to content

Commit

Permalink
[mlir][Linalg] Refactor Linalg op initTensors support - NFC
Browse files Browse the repository at this point in the history
Manually-defined named ops do not currently support `init_tensors` or return values and may never support them. Add extra interface to the StructuredOpInterface so that we can still write op-agnostic transformations based on StructuredOpInterface.

This is an NFC extension in preparation for tiling on tensors.

Differential Revision: https://reviews.llvm.org/D88481
  • Loading branch information
Nicolas Vasilache committed Sep 29, 2020
1 parent 113114a commit 6b64957
Show file tree
Hide file tree
Showing 5 changed files with 101 additions and 19 deletions.
33 changes: 26 additions & 7 deletions mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
Expand Up @@ -22,14 +22,19 @@ include "mlir/Interfaces/CopyOpInterface.td"
// The Linalg `NInputs` trait provides the API for ops that are known
// to have a specified number of inputs, all passed as operands.
// See Linalg/LinalgTraits.h for implementation details and usage.
class NInputs<int args_in> :
NativeOpTrait<"linalg::NInputs<" # !cast<string>(args_in) # ">::Impl"> {}
class NInputs<int n> :
NativeOpTrait<"linalg::NInputs<" # !cast<string>(n) # ">::Impl"> {}

// The Linalg `ZeroInitTensors` trait provides the API for ops that are known
// to not have input tensor operands.
// See Linalg/LinalgTraits.h for implementation details and usage.
def ZeroInitTensors : NativeOpTrait<"linalg::ZeroInitTensors"> {}

// The Linalg `NOutputs` trait provides the API for ops that are known
// to have a specified number of outputs, all passed as operands.
// See Linalg/LinalgTraits.h for implementation details and usage.
class NOutputs<int args_out> :
NativeOpTrait<"linalg::NOutputs<" # !cast<string>(args_out) # ">::Impl"> {}
class NOutputs<int n> :
NativeOpTrait<"linalg::NOutputs<" # !cast<string>(n) # ">::Impl"> {}

def StructuredOpTraits : NativeOpTrait<"linalg::StructuredOpTraits">;
def NamedStructuredOpTrait : NativeOpTrait<"linalg::NamedStructuredOpTrait">;
Expand Down Expand Up @@ -62,6 +67,7 @@ class LinalgStructured_Op<string mnemonic, list<OpTrait> props>
def CopyOp : LinalgStructured_Op<"copy", [
CopyOpInterface,
NInputs<1>,
ZeroInitTensors,
NOutputs<1>
]> {
let description = [{
Expand Down Expand Up @@ -159,7 +165,10 @@ def CopyOp : LinalgStructured_Op<"copy", [
let hasCanonicalizer = 1;
}

def FillOp : LinalgStructured_Op<"fill", [NInputs<0>, NOutputs<1>]> {
def FillOp : LinalgStructured_Op<"fill", [
NInputs<0>,
ZeroInitTensors,
NOutputs<1>]> {

let arguments = (ins AnyStridedMemRef:$output,
AnyTypeOf<[AnyFloat, AnySignlessInteger, AnyVector]>:$value);
Expand Down Expand Up @@ -254,7 +263,12 @@ class PoolingBase_Op<string mnemonic, list<OpTrait> props>
}];
}

def ConvOp : PoolingBase_Op<"conv", [NInputs<2>, NOutputs<1>]> {
def ConvOp : PoolingBase_Op<"conv", [
NInputs<2>,
// Despite having reductions, this manually defined ConvOp may only take
// memref operands and can never have init tensors.
ZeroInitTensors,
NOutputs<1>]> {

let description = [{
Generic n-D convolution as described in the TF documentation:
Expand Down Expand Up @@ -371,7 +385,12 @@ def ConvOp : PoolingBase_Op<"conv", [NInputs<2>, NOutputs<1>]> {
}

class SingleInputPoolingBase_Op<string mnemonic>
: PoolingBase_Op<mnemonic, [NInputs<2>, NOutputs<1>]> {
: PoolingBase_Op<mnemonic, [
NInputs<2>,
// Despite having reductions, this manually defined ConvOp may only take
// memref operands and can never have init tensors.
ZeroInitTensors,
NOutputs<1>]> {
let description = [{
A base class for single input pooling function.

Expand Down
69 changes: 59 additions & 10 deletions mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOpsInterface.td
Expand Up @@ -125,13 +125,12 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
getNumIterators(getReductionIteratorTypeName(), iters) == 1;
}]>,
//===------------------------------------------------------------------===//
// Num input/output arguments handling.
// Num input/output/initTensors arguments handling.
//===------------------------------------------------------------------===//
// These special methods must be defined by each op that wants to implement
// the LinalgStructuredInterface. For now, this is either:
// - inherited statically by using the NInputs<unsigned> or
// NOutputs<unsigned> traits.
// - derived from args_in/args_out attributes (for linalg.generic and
// - Explicitly specified in the op definition.
// - Derived from variadic attributes (for "named" ops, linalg.generic and
// linalg.indexed_generic ops).
InterfaceMethod<
/*desc=*/[{
Expand All @@ -140,6 +139,13 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
/*retTy=*/"unsigned",
/*methodName=*/"getNumInputs"
>,
InterfaceMethod<
/*desc=*/[{
Return the number of init tensors.
}],
/*retTy=*/"unsigned",
/*methodName=*/"getNumInitTensors"
>,
InterfaceMethod<
/*desc=*/[{
Return the number of outputs.
Expand Down Expand Up @@ -371,6 +377,46 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
return {range.begin(), range.begin() + getNumInputsAndOutputBuffers()};
}]
>,
InterfaceMethod<
/*desc=*/[{
Return the range over init tensors.
}],
/*retTy=*/"Operation::operand_range",
/*methodName=*/"getInitTensors",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
auto range = this->getOperation()->getOperands();
return {range.begin() + getNumInputsAndOutputBuffers(),
range.begin() + getNumInputsAndOutputs()};
}]
>,
InterfaceMethod<
/*desc=*/[{
Return one single init tensor at position `$i`.
}],
/*retTy=*/"Value",
/*methodName=*/"getInitTensor",
/*args=*/(ins "unsigned":$i),
/*methodBody=*/"",
/*defaultImplementation=*/[{
assert(i < $_op.getNumInitTensors() && "overflowing init tensor index");
return getInitTensors()[i];
}]
>,
InterfaceMethod<
/*desc=*/[{
Return the range over inputs, output buffers and init tensors.
}],
/*retTy=*/"Operation::operand_range",
/*methodName=*/"getShapedOperands",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
auto range = this->getOperation()->getOperands();
return {range.begin(), range.begin() + getNumInputsAndOutputs()};
}]
>,
InterfaceMethod<
/*desc=*/[{
Return the `i`-th shaped type, there are 3 cases:
Expand Down Expand Up @@ -445,7 +491,8 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
return llvm::to_vector<4>($_op.indexing_maps().template getAsValueRange<AffineMapAttr>());
return llvm::to_vector<4>(
$_op.indexing_maps().template getAsValueRange<AffineMapAttr>());
}]
>,
InterfaceMethod<
Expand Down Expand Up @@ -528,11 +575,11 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
}],
/*retTy=*/"Operation *",
/*methodName=*/"create",
(ins "OpBuilder &":$builder, "Location":$loc,
(ins "OpBuilder &":$builder, "Location":$loc, "TypeRange":$resultTypes,
"ValueRange":$operands,
"ArrayRef<NamedAttribute>":$attributes), [{
return builder.create<ConcreteOp>(loc, TypeRange{}, operands,
attributes);
return builder.create<ConcreteOp>(
loc, resultTypes, operands, attributes);
}]
>,
InterfaceMethod<
Expand All @@ -542,10 +589,12 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
}],
/*retTy=*/"Operation *",
/*methodName=*/"clone",
(ins "OpBuilder &":$b, "Location":$loc, "ValueRange":$operands), [{
(ins "OpBuilder &":$b, "Location":$loc, "TypeRange":$resultTypes,
"ValueRange":$operands),
[{
BlockAndValueMapping map;
unsigned numRegions = $_op.getOperation()->getNumRegions();
Operation *res = create(b, loc, operands, $_op.getAttrs());
Operation *res = create(b, loc, resultTypes, operands, $_op.getAttrs());
assert(res->getNumRegions() == numRegions && "inconsistent # regions");
for (unsigned ridx = 0; ridx < numRegions; ++ridx)
$_op.getOperation()->getRegion(ridx).cloneInto(
Expand Down
14 changes: 14 additions & 0 deletions mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
Expand Up @@ -35,6 +35,17 @@ template <unsigned N> class NInputs {
};
};

/// This class provides the API for ops that are known to not have init tensor
/// operands. Use as a trait as follows:
///
/// class CopyOp : public Op<CopyOp, OpTrait::ZeroInitTensors> {
///
template <typename ConcreteType>
class ZeroInitTensors : public TraitBase<ConcreteType, ZeroInitTensors> {
public:
static unsigned getNumInitTensors() { return 0; }
};

/// This class provides the API for ops that are known to have a specified
/// number of outputs, all passed as operands. Use as a trait as follows:
///
Expand Down Expand Up @@ -87,6 +98,9 @@ class NamedStructuredOpTrait
unsigned getNumInputs() {
return cast<ConcreteType>(this->getOperation()).inputs().size();
}
unsigned getNumInitTensors() {
return cast<ConcreteType>(this->getOperation()).init_tensors().size();
}
unsigned getNumOutputs() {
ConcreteType concreteOp = cast<ConcreteType>(this->getOperation());
return concreteOp.output_buffers().size() +
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
Expand Up @@ -99,7 +99,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
auto operands = getAssumedNonViewOperands(op);
clonedViews.append(operands.begin(), operands.end());

Operation *clonedOp = op.clone(b, loc, clonedViews);
Operation *clonedOp = op.clone(b, loc, /*resultTypes*/ {}, clonedViews);
// When the producer is an IndexedGenercOp, we have to transform its block
// IV arguments according to the tiling of the consumer, i.e. offset them by
// the values computed in `loopRanges`.
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
Expand Up @@ -405,7 +405,7 @@ Optional<TiledLinalgOp> static tileLinalgOpImpl(
tileSizes, allViewSizes);
auto operands = getAssumedNonViewOperands(op);
views.append(operands.begin(), operands.end());
res = op.clone(b, loc, views);
res = op.clone(b, loc, /*resultTypes*/ {}, views);
return scf::ValueVector{};
},
options.distribution);
Expand Down

0 comments on commit 6b64957

Please sign in to comment.