Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding optional ops in contrib ops #7946

Merged
merged 26 commits into from
Jun 24, 2021
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added =
Empty file.
2 changes: 1 addition & 1 deletion cgmanifests/submodules/cgmanifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@
"component": {
"type": "git",
"git": {
"commitHash": "adeb09b3af63939c507355f1ef6bca9c32e7e244",
"commitHash": "04971f7d26aad7bbc71e2d92433631f985cb96c6",
"repositoryUrl": "https://github.com/onnx/onnx"
},
"comments": "git submodule at cmake/external/onnx"
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/onnx
Submodule onnx updated 188 files
1 change: 1 addition & 0 deletions include/onnxruntime/core/graph/graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ class Node {
ADD_ATTR_INTERFACES(ONNX_NAMESPACE::TensorProto)
ADD_ATTR_INTERFACES(ONNX_NAMESPACE::GraphProto)
ADD_ATTR_INTERFACES(ONNX_NAMESPACE::SparseTensorProto)
ADD_ATTR_INTERFACES(ONNX_NAMESPACE::TypeProto)

/** Gets the Node's attributes. */
const NodeAttributes& GetAttributes() const noexcept { return attributes_; }
Expand Down
134 changes: 134 additions & 0 deletions onnxruntime/core/graph/contrib_ops/contrib_defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2679,6 +2679,140 @@ It's an extension of Gelu. It takes the sum of input A and bias input B as the i
updateOutputElemType(ctx, 0, ONNX_NAMESPACE::TensorProto::BOOL);
});

static const char* OptionalConstruct_ver1_doc = R"DOC(
Construct an optional type containing either an empty optional of a certain type specified by the attribute, "
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
"or an optional type containing the 'input' element."
)DOC";

ONNX_CONTRIB_OPERATOR_SCHEMA(OptionalConstruct)
.SetDomain(kMSDomain)
.SinceVersion(1)
.SetDoc(OptionalConstruct_ver1_doc)
.Input(0, "input", "The input element.", "T", OpSchema::Optional)
.Attr("type", "Type of the element in the optional output", AttributeProto::TYPE_PROTO, OPTIONAL_VALUE)
.Output(0, "output", "The optional output enclosing the input element.", "O")
.TypeConstraint(
"T",
{"tensor(float)",
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
"seq(tensor(float))"},
"Constrains input type to all tensor and sequence types.")
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
.TypeConstraint(
"O",
{"optional(tensor(float))", "optional(tensor(uint8))", "optional(tensor(uint16))",
"optional(tensor(uint32))", "optional(tensor(uint64))", "optional(tensor(int8))",
"optional(tensor(int16))", "optional(tensor(int32))", "optional(tensor(int64))",
"optional(tensor(float16))", "optional(tensor(float))", "optional(tensor(double))",
Copy link
Contributor

@hwangdeyu hwangdeyu Jun 16, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We already had optional(tensor(float)) at the beginning. Does this line should be removed or float -> bfloat16?

"optional(tensor(string))", "optional(tensor(bool))", "optional(tensor(complex64))",
"optional(tensor(complex128))",
"optional(seq(tensor(float)))", "optional(seq(tensor(uint8)))", "optional(seq(tensor(uint16)))",
"optional(seq(tensor(uint32)))", "optional(seq(tensor(uint64)))", "optional(seq(tensor(int8)))",
"optional(seq(tensor(int16)))", "optional(seq(tensor(int32)))", "optional(seq(tensor(int64)))",
"optional(seq(tensor(float16)))", "optional(seq(tensor(float)))", "optional(seq(tensor(double)))",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

"optional(seq(tensor(string)))", "optional(seq(tensor(bool)))", "optional(seq(tensor(complex64)))",
"optional(seq(tensor(complex128)))"},
"Constrains output type to all optional tensor or optional sequence types.")
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
const size_t numOutputs = ctx.getNumOutputs();
if (numOutputs != 1) {
fail_type_inference("OptionalConstruct is expected to have an output.");
}

const size_t numInputs = ctx.getNumInputs();
const auto* attr_proto = ctx.getAttribute("type");

if ((numInputs == 0) && (attr_proto != nullptr)) {
if (!attr_proto->has_tp())
fail_type_inference(
"Attribute 'type' should be a TypeProto and it should specify a type.");
auto attr_tp = attr_proto->tp();
ctx.getOutputType(0)
->mutable_optional_type()
->mutable_elem_type()
->CopyFrom(attr_tp);
} else if (numInputs == 1) {
auto input_type = ctx.getInputType(0);
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
if(input_type == nullptr){
fail_type_inference("Input type is null. Type information is expected for the input.");
}
ctx.getOutputType(0)
->mutable_optional_type()
->mutable_elem_type()
->CopyFrom(*input_type);
} else {
fail_type_inference("OptionalConstruct is expected to have either an input or the type attribute set.");
}
});

static const char* OptionalHasElement_ver1_doc = R"DOC(
Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false.
)DOC";

ONNX_CONTRIB_OPERATOR_SCHEMA(OptionalHasElement)
.SetDomain(kMSDomain)
.SinceVersion(1)
.SetDoc(OptionalHasElement_ver1_doc)
.Input(0, "input", "The optional input.", "O")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Will it be prudent to think about making this operator more generic in the number of input operands it supports ? Can it support taking in 'N' inputs (the first one is necessary and N-1 are optional) to produce the same number of output booleans as the number of inputs ? That way we can avoid inserting this operator for every optional output coming from a layer (possibly gaining runtime advantages of just using one op vs many ops) ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So I spent some time thinking about this idea. So far I did not come up with an example for this feature among pytorch models and use-cases we've had. Do you have a specific code logic in mind.
Usually in the models, if there is a list with multiple optional elements, the list has dynamic length and it does not help to export it to an operator with fixed ('N') number of inputs.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be done for any unary operator, in principle. But we don't have it for any existing unary operator, which is suggestive that this is not common enough? I think unless there is a use-case, my preference is to keep it simple.

.Output(0, "output", "A scalar boolean tensor. If true, it indicates that optional-type input contains an element. Otherwise, it is empty.", "B")
.TypeConstraint(
"O",
{"optional(tensor(float))",
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
"optional(seq(tensor(float)))"},
"Constrains input type to optional tensor and optional sequence types.")
.TypeConstraint(
"B",
{"tensor(bool)"},
"Constrains output to a boolean tensor.")
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
const size_t numInputs = ctx.getNumInputs();
if (numInputs != 1) {
fail_type_inference("OptionalHasElement is expected to have 1 input.");
}
const size_t numOutputs = ctx.getNumOutputs();
if (numOutputs != 1) {
fail_type_inference("OptionalHasElement is expected to have 1 output.");
}
auto* output_tensor_type = ctx.getOutputType(0)->mutable_tensor_type();
output_tensor_type->set_elem_type(TensorProto::BOOL);
output_tensor_type->mutable_shape()->Clear();
});

static const char* OptionalGetElement_ver1_doc = R"DOC(
Outputs the element in the optional-type input'. It is an error if the input value does not have an element "
"and the behavior is undefined in this case."
)DOC";

ONNX_CONTRIB_OPERATOR_SCHEMA(OptionalGetElement)
.SetDomain(kMSDomain)
.SinceVersion(1)
.SetDoc(OptionalGetElement_ver1_doc)
.Input(0, "input", "The optional input.", "O")
.Output(0, "output", "Output element in the optional input.", "V")
.TypeConstraint(
"O",
{"optional(tensor(float))",
"optional(seq(tensor(float)))"},
"Constrains input type to optional tensor and optional sequence types.")
.TypeConstraint(
"V",
{"tensor(float)",
"seq(tensor(float))"},
"Constrain output type to all tensor or sequence types.")
.TypeAndShapeInferenceFunction([](InferenceContext& ctx) {
const size_t numInputs = ctx.getNumInputs();
if (numInputs != 1) {
fail_type_inference("OptionalGetElement must have an input element.");
}
auto input_type = ctx.getInputType(0);
if (input_type == nullptr) {
fail_type_inference("Input type is null. Input must have Type information.");
}
if (!input_type->has_optional_type() || !input_type->optional_type().has_elem_type()) {
fail_type_inference("Input must be an optional-type value containing an element with type information.");
}
ctx.getOutputType(0)
neginraoof marked this conversation as resolved.
Show resolved Hide resolved
->CopyFrom(input_type->optional_type().elem_type());
});

#ifndef _OPSCHEMA_LIB_
// Register the NCHWc schemas if supported by the platform.
if (MlasNchwcGetBlockSize() > 1) {
Expand Down
3 changes: 3 additions & 0 deletions onnxruntime/core/graph/graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,7 @@ common::Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& inpu
} break;
case TypeProto::kSequenceType:
case TypeProto::kMapType:
case TypeProto::kOptionalType:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same comment as above

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is needed for this PR.

case TypeProto::kOpaqueType:
case TypeProto::VALUE_NOT_SET:
break;
Expand Down Expand Up @@ -799,12 +800,14 @@ ADD_BASIC_ATTR_IMPL(int64_t, AttributeProto_AttributeType::AttributeProto_Attrib
ADD_BASIC_ATTR_IMPL(std::string, AttributeProto_AttributeType::AttributeProto_AttributeType_STRING, s)
ADD_ATTR_IMPL(TensorProto, AttributeProto_AttributeType::AttributeProto_AttributeType_TENSOR, t)
ADD_ATTR_IMPL(SparseTensorProto, AttributeProto_AttributeType::AttributeProto_AttributeType_SPARSE_TENSOR, sparse_tensor)
ADD_ATTR_IMPL(TypeProto, AttributeProto_AttributeType::AttributeProto_AttributeType_TYPE_PROTO, tp)
ADD_LIST_ATTR_IMPL(float, AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS, floats)
ADD_LIST_ATTR_IMPL(int64_t, AttributeProto_AttributeType::AttributeProto_AttributeType_INTS, ints)
ADD_LIST_ATTR_IMPL(std::string, AttributeProto_AttributeType::AttributeProto_AttributeType_STRINGS, strings)
ADD_LIST_ATTR_IMPL(TensorProto, AttributeProto_AttributeType::AttributeProto_AttributeType_TENSORS, tensors)
ADD_LIST_ATTR_IMPL(GraphProto, AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPHS, graphs)
ADD_LIST_ATTR_IMPL(SparseTensorProto, AttributeProto_AttributeType::AttributeProto_AttributeType_SPARSE_TENSORS, sparse_tensors)
ADD_LIST_ATTR_IMPL(TypeProto, AttributeProto_AttributeType::AttributeProto_AttributeType_TYPE_PROTOS, type_protos)

#if !defined(ORT_MINIMAL_BUILD)
bool Node::ClearAttribute(const std::string& attr_name) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,11 @@ bool AreEqual(const ONNX_NAMESPACE::AttributeProto& lhs, const ONNX_NAMESPACE::A
case onnx::AttributeProto_AttributeType_TENSOR:
case onnx::AttributeProto_AttributeType_GRAPH:
case onnx::AttributeProto_AttributeType_SPARSE_TENSOR:
case onnx::AttributeProto_AttributeType_TYPE_PROTO:
case onnx::AttributeProto_AttributeType_TENSORS:
case onnx::AttributeProto_AttributeType_GRAPHS:
case onnx::AttributeProto_AttributeType_SPARSE_TENSORS:
case onnx::AttributeProto_AttributeType_TYPE_PROTOS:
case onnx::AttributeProto_AttributeType_UNDEFINED:
return false; // Don't support these attributes for now; corresponding nodes will be considered distinct.
}
Expand Down Expand Up @@ -233,9 +235,11 @@ std::size_t GetAttributeHash(const ONNX_NAMESPACE::AttributeProto& attr) {
case onnx::AttributeProto_AttributeType_TENSOR:
case onnx::AttributeProto_AttributeType_GRAPH:
case onnx::AttributeProto_AttributeType_SPARSE_TENSOR:
case onnx::AttributeProto_AttributeType_TYPE_PROTO:
case onnx::AttributeProto_AttributeType_TENSORS:
case onnx::AttributeProto_AttributeType_GRAPHS:
case onnx::AttributeProto_AttributeType_SPARSE_TENSORS:
case onnx::AttributeProto_AttributeType_TYPE_PROTOS:
case onnx::AttributeProto_AttributeType_UNDEFINED:
break;
}
Expand Down
151 changes: 151 additions & 0 deletions onnxruntime/test/contrib_ops/optional_ops_shape_inference_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "test/providers/provider_test_utils.h"
#include "onnx/shape_inference/implementation.h"
#include "onnx/checker.h"
#include "test/providers/cpu/tensor/shape_inference_test_helper.h"

namespace onnxruntime {
namespace test {
using namespace ONNX_NAMESPACE;

TEST(ShapeInferenceTests, optional_empty_tensor) {
ONNX_NAMESPACE::AttributeProto attrProto;
attrProto.set_name("type");
attrProto.set_type(ONNX_NAMESPACE::AttributeProto::TYPE_PROTO);
attrProto.mutable_tp()->mutable_tensor_type()->set_elem_type(TensorProto_DataType::TensorProto_DataType_FLOAT);
std::vector<ONNX_NAMESPACE::AttributeProto> attributes = {attrProto};

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* tensor_type = output.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);

TestShapeInference("OptionalConstruct", kMSDomain, 1, 6, {}, attributes, output);
}

TEST(ShapeInferenceTests, optional_empty_sequence) {
ONNX_NAMESPACE::AttributeProto attrProto;
attrProto.set_name("type");
attrProto.set_type(ONNX_NAMESPACE::AttributeProto::TYPE_PROTO);
attrProto.mutable_tp()->mutable_sequence_type()->mutable_elem_type()->mutable_tensor_type()->set_elem_type(TensorProto_DataType::TensorProto_DataType_FLOAT);
std::vector<ONNX_NAMESPACE::AttributeProto> attributes = {attrProto};

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* sequence_type = output.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_sequence_type();
auto* tensor_type = sequence_type->mutable_elem_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);

TestShapeInference("OptionalConstruct", kMSDomain, 1, 6, {}, attributes, output);
}

TEST(TriluContribOpTest, optional_construct_tensor) {
ONNX_NAMESPACE::ValueInfoProto input;
input.set_name("X");
auto* tensor_type = input.mutable_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
std::vector<int64_t> shape = {2, 3};
auto* value_info_shape = tensor_type->mutable_shape();
for (int64_t dim_value : shape) {
value_info_shape->add_dim()->set_dim_value(dim_value);
}

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* output_type = output.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_tensor_type();
output_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* output_value_info_shape = output_type->mutable_shape();
for (int64_t dim_value : shape) {
output_value_info_shape->add_dim()->set_dim_value(dim_value);
}

TestShapeInference("OptionalConstruct", kMSDomain, 1, 6, {input}, {}, output);
}

TEST(TriluContribOpTest, optional_construct_sequence) {
ONNX_NAMESPACE::ValueInfoProto input;
input.set_name("X");
auto* tensor_type = input.mutable_type()->mutable_sequence_type()->mutable_elem_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
std::vector<int64_t> shape = {2, 3};
auto* value_info_shape = tensor_type->mutable_shape();
for (int64_t dim_value : shape) {
value_info_shape->add_dim()->set_dim_value(dim_value);
}

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* output_type = output.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_sequence_type()->mutable_elem_type()->mutable_tensor_type();
output_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* output_value_info_shape = output_type->mutable_shape();
for (int64_t dim_value : shape) {
output_value_info_shape->add_dim()->set_dim_value(dim_value);
}

TestShapeInference("OptionalConstruct", kMSDomain, 1, 6, {input}, {}, output);
}

TEST(TriluContribOpTest, optional_has_element) {
ONNX_NAMESPACE::ValueInfoProto input;
input.set_name("X");
auto* input_type = input.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_tensor_type();
input_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* output_type = output.mutable_type()->mutable_tensor_type();
output_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_BOOL);

TestShapeInference("OptionalHasElement", kMSDomain, 1, 6, {input}, {}, output);
}

TEST(TriluContribOpTest, optional_get_tensor) {
ONNX_NAMESPACE::ValueInfoProto input;
input.set_name("X");
auto* input_type = input.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_tensor_type();
input_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* input_value_info_shape = input_type->mutable_shape();
std::vector<int64_t> shape = {2, 3};
for (int64_t dim_value : shape) {
input_value_info_shape->add_dim()->set_dim_value(dim_value);
}

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* tensor_type = output.mutable_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* value_info_shape = tensor_type->mutable_shape();
for (int64_t dim_value : shape) {
value_info_shape->add_dim()->set_dim_value(dim_value);
}

TestShapeInference("OptionalGetElement", kMSDomain, 1, 6, {input}, {}, output);
}

TEST(TriluContribOpTest, optional_get_sequence) {
ONNX_NAMESPACE::ValueInfoProto input;
input.set_name("X");
auto* tensor_type = input.mutable_type()->mutable_optional_type()->mutable_elem_type()->mutable_sequence_type()->mutable_elem_type()->mutable_tensor_type();
tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
std::vector<int64_t> shape = {2, 3};
auto* value_info_shape = tensor_type->mutable_shape();
for (int64_t dim_value : shape) {
value_info_shape->add_dim()->set_dim_value(dim_value);
}

ONNX_NAMESPACE::ValueInfoProto output;
output.set_name("Y");
auto* output_type = output.mutable_type()->mutable_sequence_type()->mutable_elem_type()->mutable_tensor_type();
output_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
auto* output_value_info_shape = output_type->mutable_shape();
for (int64_t dim_value : shape) {
output_value_info_shape->add_dim()->set_dim_value(dim_value);
}

TestShapeInference("OptionalGetElement", kMSDomain, 1, 6, {input}, {}, output);
}

} // namespace test
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ mypy
pytest
setuptools>=41.4.0
wheel
git+http://github.com/onnx/onnx.git@adeb09b3af63939c507355f1ef6bca9c32e7e244#egg=onnx
git+http://github.com/onnx/onnx.git@04971f7d26aad7bbc71e2d92433631f985cb96c6#egg=onnx
protobuf
sympy==1.1.1
flake8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ mypy
pytest
setuptools>=41.4.0
wheel
git+http://github.com/onnx/onnx.git@adeb09b3af63939c507355f1ef6bca9c32e7e244#egg=onnx
git+http://github.com/onnx/onnx.git@04971f7d26aad7bbc71e2d92433631f985cb96c6#egg=onnx
argparse
sympy==1.1.1
flake8
Expand Down