Skip to content

Commit

Permalink
Forking dynamic behavior from flow.tensor.constant. (#17034)
Browse files Browse the repository at this point in the history
Needed this sooner than I expected due to requiring the ConstantLike
trait on the constant op in order for constant materialization to work.
Now the special behavior is moved to the flow.tensor.dynamic_constant
op.

Follow-up to #17024.
  • Loading branch information
benvanik committed Apr 12, 2024
1 parent 954cb36 commit 55fafcf
Show file tree
Hide file tree
Showing 28 changed files with 281 additions and 130 deletions.
30 changes: 30 additions & 0 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowBase.td
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ include "iree/compiler/Dialect/Flow/IR/FlowInterfaces.td"
include "iree/compiler/Dialect/Util/IR/UtilBase.td"
include "iree/compiler/Dialect/Util/IR/UtilTypes.td"
include "mlir/IR/AttrTypeBase.td"
include "mlir/IR/BuiltinAttributeInterfaces.td"

//===----------------------------------------------------------------------===//
// IREE execution flow dialect
Expand Down Expand Up @@ -233,4 +234,33 @@ def FLOW_CollectiveReductionOpAttr :
let cppNamespace = "mlir::iree_compiler::IREE::Flow";
}

//===----------------------------------------------------------------------===//
// Parameter storage attributes
//===----------------------------------------------------------------------===//

def FLOW_NamedParameterAttr :
AttrDef<Flow_Dialect, "NamedParameter", [
TypedAttrInterface,
]> {
let mnemonic = "parameter.named";
let summary = [{named parameter referenced an optional scope and key}];
let description = [{
Species an externally-defined parameter that can be referenced by an
optional scope defining a set of parameters and a key uniquely identifying
the parameter within its scope.
}];
let parameters = (ins
AttributeSelfTypeParameter<"">:$type,
OptionalParameter<"StringAttr">:$scope,
AttrParameter<"StringAttr", "">:$key,
OptionalParameter<"DictionaryAttr">:$config
);
let assemblyFormat = [{
`<`
custom<ParameterReference>($scope, $key)
(`,` $config^)?
`>`
}];
}

#endif // IREE_DIALECT_FLOW_BASE
6 changes: 5 additions & 1 deletion compiler/src/iree/compiler/Dialect/Flow/IR/FlowDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,12 @@ FlowDialect::FlowDialect(MLIRContext *context)

Operation *FlowDialect::materializeConstant(OpBuilder &builder, Attribute value,
Type type, Location loc) {
if (arith::ConstantOp::isBuildableWith(value, type))
if (arith::ConstantOp::isBuildableWith(value, type)) {
return builder.create<arith::ConstantOp>(loc, type, cast<TypedAttr>(value));
} else if (IREE::Flow::TensorConstantOp::isBuildableWith(value, type)) {
return builder.create<IREE::Flow::TensorConstantOp>(loc, type,
cast<TypedAttr>(value));
}
return nullptr;
}

Expand Down
65 changes: 44 additions & 21 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -779,14 +779,57 @@ static bool compareShapesEqual(ShapedType lhsType, ValueRange lhsDynamicDims,
// flow.tensor.constant
//===----------------------------------------------------------------------===//

OpFoldResult TensorConstantOp::fold(FoldAdaptor operands) {
OpFoldResult TensorConstantOp::fold(FoldAdaptor operands) { return getValue(); }

//===----------------------------------------------------------------------===//
// flow.tensor.dynamic_constant
//===----------------------------------------------------------------------===//

OpFoldResult TensorDynamicConstantOp::fold(FoldAdaptor operands) {
auto dynamicType = getType();
if (dynamicType.getNumDynamicDims() == 0) {
return getValue();
}
return {};
}

namespace {

struct ExpandDynamicShapeConstant
: public OpRewritePattern<TensorDynamicConstantOp> {
using OpRewritePattern<TensorDynamicConstantOp>::OpRewritePattern;
LogicalResult matchAndRewrite(TensorDynamicConstantOp op,
PatternRewriter &rewriter) const override {
auto constantOp = rewriter.create<IREE::Flow::TensorConstantOp>(
op.getLoc(), op.getValue());
auto dynamicType = op.getType();
auto staticType = cast<ShapedType>(op.getValue().getType());
SmallVector<Value> dynamicDims;
for (int64_t i = 0; i < dynamicType.getRank(); ++i) {
if (dynamicType.isDynamicDim(i)) {
auto dimValue = rewriter
.create<arith::ConstantIndexOp>(
op.getLoc(), staticType.getDimSize(i))
.getResult();
dynamicDims.push_back(rewriter
.create<IREE::Util::OptimizationBarrierOp>(
op.getLoc(), dimValue)
.getResult(0));
}
}
rewriter.replaceOpWithNewOp<IREE::Flow::TensorReshapeOp>(
op, dynamicType, constantOp.getResult(), dynamicDims);
return success();
}
};

} // namespace

void TensorDynamicConstantOp::getCanonicalizationPatterns(
RewritePatternSet &results, MLIRContext *context) {
results.insert<ExpandDynamicShapeConstant>(context);
}

//===----------------------------------------------------------------------===//
// flow.tensor.tie_shape
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -973,26 +1016,6 @@ struct ResolveShapedDim : public OpRewritePattern<tensor::DimOp> {
return success();
}

// Special handling of flow.tensor.constant which may be acting as a
// dynamically shaped value that we want to remove the tensor.dim of but
// still treat the shape as dynamic. We do this by inserting an optimization
// barrier between the constant and the consumers. Note that this use case
// is very specific and generally only applicable to tests/benchmarks.
if (auto constantOp = dyn_cast_if_present<IREE::Flow::TensorConstantOp>(
op.getShapedValue().getDefiningOp())) {
auto valueType = dyn_cast<ShapedType>(constantOp.getValue().getType());
if (valueType && valueType != constantOp.getType()) {
// Constant op is acting as a cast. If the dimension being queried was
// static it would have been resolved above so we know it's dynamic
// here.
Value staticValue = rewriter.create<arith::ConstantIndexOp>(
op.getLoc(), valueType.getDimSize(idx));
rewriter.replaceOpWithNewOp<IREE::Util::OptimizationBarrierOp>(
op, staticValue);
return success();
}
}

return rewriter.notifyMatchFailure(op, "no dynamic dims found/usable");
}
};
Expand Down
37 changes: 10 additions & 27 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1597,35 +1597,18 @@ LogicalResult CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
// flow.tensor.constant
//===----------------------------------------------------------------------===//

ParseResult TensorConstantOp::parse(OpAsmParser &parser,
OperationState &result) {
if (parser.parseOptionalAttrDict(result.attributes))
return failure();
TypedAttr valueAttr;
if (failed(parser.parseAttribute(valueAttr)))
return failure();
result.addAttribute("value", valueAttr);
if (succeeded(parser.parseOptionalArrow())) {
Type resultType;
if (failed(parser.parseType(resultType)))
return failure();
result.addTypes(resultType);
} else {
result.addTypes(valueAttr.getType());
}
return success();
// static
bool TensorConstantOp::isBuildableWith(Attribute value, Type type) {
return isa<RankedTensorType>(type);
}

void TensorConstantOp::print(OpAsmPrinter &p) {
p << " ";
p.printOptionalAttrDict((*this)->getAttrs(), {"value"});
p.printAttribute(getValue());
auto attrType = getValue().getType();
auto resultType = getType();
if (attrType != resultType) {
p << " -> ";
p.printType(resultType);
}
//===----------------------------------------------------------------------===//
// flow.tensor.dynamic_constant
//===----------------------------------------------------------------------===//

// static
bool TensorDynamicConstantOp::isBuildableWith(Attribute value, Type type) {
return TensorConstantOp::isBuildableWith(value, type);
}

//===----------------------------------------------------------------------===//
Expand Down
44 changes: 40 additions & 4 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1040,26 +1040,62 @@ def OpGroupTensorOps : OpDocGroup {

let opDocGroup = OpGroupTensorOps in {

def FLOW_TensorConstantOp : FLOW_PureOp<"tensor.constant"> {
def FLOW_TensorConstantOp : FLOW_PureOp<"tensor.constant", [
ConstantLike,
AllTypesMatch<["value", "result"]>,
]> {
let summary = [{tensor constant that can have dynamic dimensions}];
let description = [{
Allows specifying a tensor constant of IREE-specific types/attributes or
where the return value can erase shape information.
Allows specifying a tensor constant of IREE-specific types/attributes.

```mlir
%cst = flow.tensor.constant #something_tensor_like : tensor<2x2xf32>
%res = math.absf %cst : tensor<2x2xf32>
```
}];
let arguments = (ins TypedAttrInterface:$value);
let results = (outs AnyTensor:$result);

let assemblyFormat = [{
attr-dict $value
}];

let extraClassDeclaration = [{
// Returns true if the constant op can be built with the given attribute.
static bool isBuildableWith(Attribute value, Type type);
}];

let hasFolder = 1;
}

def FLOW_TensorDynamicConstantOp : FLOW_Op<"tensor.dynamic_constant"> {
let summary = [{tensor constant that can have dynamic dimensions}];
let description = [{
Allows specifying a tensor constant of IREE-specific types/attributes with
a dynamic shape that approximates a value as passed from the user. This
disables many optimizations and should only be used when testing or
benchmarking and wanting to ensure that dynamic dimension behavior is
preserved.

```mlir
%cst = flow.tensor.constant dense<4.0> : tensor<2x2xf32> -> tensor<?x2xf32>
%cst = flow.tensor.dynamic_constant #something_tensor_like : tensor<2x2xf32> -> tensor<?x2xf32>
%res = math.absf %cst : tensor<?x2xf32>
```
}];
let arguments = (ins TypedAttrInterface:$value);
let results = (outs AnyTensor:$result);

let assemblyFormat = [{
attr-dict $value `->` type($result)
}];

let extraClassDeclaration = [{
// Returns true if the constant op can be built with the given attribute.
static bool isBuildableWith(Attribute value, Type type);
}];

let hasFolder = 1;
let hasCanonicalizer = 1;
}

def FLOW_TensorTieShapeOp : FLOW_PureOp<"tensor.tie_shape", [
Expand Down
34 changes: 34 additions & 0 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,4 +276,38 @@ getCollectiveElementTypeAttr(RankedTensorType type) {
*collectiveElemType);
}

//===----------------------------------------------------------------------===//
// custom<ParameterReference>($scope, $key)
//===----------------------------------------------------------------------===//

ParseResult parseParameterReference(AsmParser &parser, StringAttr &scopeAttr,
StringAttr &keyAttr) {
auto builder = parser.getBuilder();
StringAttr firstAttr;
if (failed(parser.parseCustomAttributeWithFallback(firstAttr,
builder.getNoneType()))) {
return failure();
}
if (failed(parser.parseOptionalColon())) {
keyAttr = firstAttr;
return success();
}
scopeAttr = firstAttr;
if (failed(parser.parseColon()) ||
failed(parser.parseCustomAttributeWithFallback(keyAttr,
builder.getNoneType()))) {
return failure();
}
return success();
}

void printParameterReference(AsmPrinter &p, StringAttr scopeAttr,
StringAttr keyAttr) {
if (scopeAttr) {
p << "\"" << scopeAttr.getValue() << "\"";
p << "::";
}
p << "\"" << keyAttr.getValue() << "\"";
}

} // namespace mlir::iree_compiler::IREE::Flow
14 changes: 14 additions & 0 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,20 @@ getCollectiveElementTypeAttr(RankedTensorType type);
std::optional<IREE::Flow::CollectiveElementType>
convertToFlowCollectiveElementType(Type type);

//===----------------------------------------------------------------------===//
// custom<ParameterReference>($scope, $key)
//===----------------------------------------------------------------------===//

ParseResult parseParameterReference(AsmParser &parser, StringAttr &scopeAttr,
StringAttr &keyAttr);
void printParameterReference(AsmPrinter &p, StringAttr scopeAttr,
StringAttr keyAttr);
static inline void printParameterReference(AsmPrinter &p, Operation *op,
StringAttr scopeAttr,
StringAttr keyAttr) {
printParameterReference(p, scopeAttr, keyAttr);
}

} // namespace mlir::iree_compiler::IREE::Flow

#endif // IREE_COMPILER_DIALECT_FLOW_IR_FLOWTYPES_H_
21 changes: 11 additions & 10 deletions compiler/src/iree/compiler/Dialect/Flow/IR/test/tensor_folding.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
util.func public @expandStaticShapeConstant() -> (tensor<2x4xi32>, index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
// CHECK-DAG: %[[CST:.+]] = arith.constant dense<2> : tensor<2x4xi32>
%0 = flow.tensor.constant dense<2> : tensor<2x4xi32> -> tensor<2x4xi32>
// CHECK-DAG: %[[CST:.+]] = flow.tensor.constant dense<2> : tensor<2x4xi32>
%0 = flow.tensor.constant dense<2> : tensor<2x4xi32>
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
%d0 = tensor.dim %0, %c0 : tensor<2x4xi32>
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
Expand All @@ -16,19 +16,20 @@ util.func public @expandStaticShapeConstant() -> (tensor<2x4xi32>, index, index)

// -----

// CHECK-LABEL: @tensorDimOfDynamicConstant
util.func public @tensorDimOfDynamicConstant() -> (index, index) {
// CHECK-LABEL: @expandDynamicShapeConstant
util.func public @expandDynamicShapeConstant() -> (tensor<2x?xi32>, index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
// CHECK-NOT: flow.tensor.constant
%0 = flow.tensor.constant dense<2> : tensor<2x4xi32> -> tensor<2x?xi32>
// CHECK-DAG: %[[CST:.+]] = flow.tensor.constant dense<2> : tensor<2x4xi32>
%0 = flow.tensor.dynamic_constant dense<2> : tensor<2x4xi32> -> tensor<2x?xi32>
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
%d0 = tensor.dim %0, %c0 : tensor<2x?xi32>
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
// CHECK-DAG: %[[C4_DYNAMIC:.+]] = util.optimization_barrier %[[C4]]
// CHECK-DAG: %[[D1:.+]] = util.optimization_barrier %[[C4]] : index
// CHECK: %[[T:.+]] = flow.tensor.reshape %[[CST]] : tensor<2x4xi32> -> tensor<2x?xi32>{%[[D1]]}
%d0 = tensor.dim %0, %c0 : tensor<2x?xi32>
%d1 = tensor.dim %0, %c1 : tensor<2x?xi32>
// CHECK: util.return %[[C2]], %[[C4_DYNAMIC]]
util.return %d0, %d1 : index, index
// CHECK: util.return %[[T]], %[[C2]], %[[D1]]
util.return %0, %d0, %d1 : tensor<2x?xi32>, index, index
}

// -----
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -794,8 +794,8 @@ util.func public @dynamic_dot() -> tensor<?x?xf32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = flow.tensor.constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32>
%1 = flow.tensor.constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32>
%0 = flow.tensor.dynamic_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01], [1.200000e+01, 1.100000e+01, 1.000000e+01], [9.000000e+00, 8.000000e+00, 7.000000e+00], [6.000000e+00, 5.000000e+00, 4.000000e+00], [3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<5x3xf32> -> tensor<?x?xf32>
%1 = flow.tensor.dynamic_constant dense<[[1.500000e+01, 1.400000e+01, 1.300000e+01, 1.200000e+01, 1.100000e+01], [1.000000e+01, 9.000000e+00, 8.000000e+00, 7.000000e+00, 6.000000e+00], [5.000000e+00, 4.000000e+00, 3.000000e+00, 2.000000e+00, 1.000000e+00]]> : tensor<3x5xf32> -> tensor<?x?xf32>
%2 = tensor.dim %0, %c0 : tensor<?x?xf32>
%3 = tensor.dim %1, %c1 : tensor<?x?xf32>
%4 = tensor.empty(%2, %3) : tensor<?x?xf32>
Expand Down
Loading

0 comments on commit 55fafcf

Please sign in to comment.