diff --git a/onnxscript/__init__.py b/onnxscript/__init__.py index 61d9164a0c..403eccb140 100644 --- a/onnxscript/__init__.py +++ b/onnxscript/__init__.py @@ -26,10 +26,10 @@ opset16, opset17, opset18, + opset_ai_onnx_ml1, + opset_ai_onnx_ml2, + opset_ai_onnx_ml3, default_opset, - onnxml1, - onnxml2, - onnxml3, ) from .onnx_types import ( @@ -99,8 +99,8 @@ "opset16", "opset17", "opset18", + "opset_ai_onnx_ml1", + "opset_ai_onnx_ml2", + "opset_ai_onnx_ml3", "default_opset", - "onnxml1", - "onnxml2", - "onnxml3", ] diff --git a/onnxscript/onnx_opset.py b/onnxscript/onnx_opset.py deleted file mode 100644 index 5eac3c2f42..0000000000 --- a/onnxscript/onnx_opset.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -# -------------------------------------------------------------------------- -from __future__ import annotations - -from onnx.defs import onnx_opset_version - -from onnxscript import values - -if onnx_opset_version() < 14: - raise ImportError( - f"onnx-script requires onnx opset >= 14 but {onnx_opset_version()} is detected." - ) - -opset1 = values.Opset("", 1) -opset2 = values.Opset("", 2) -opset3 = values.Opset("", 3) -opset4 = values.Opset("", 4) -opset5 = values.Opset("", 5) -opset6 = values.Opset("", 6) -opset7 = values.Opset("", 7) -opset8 = values.Opset("", 8) -opset9 = values.Opset("", 9) -opset10 = values.Opset("", 10) -opset11 = values.Opset("", 11) -opset12 = values.Opset("", 12) -opset13 = values.Opset("", 13) -opset14 = values.Opset("", 14) -opset15 = values.Opset("", 15) -opset16 = values.Opset("", 16) -opset17 = values.Opset("", 17) -opset18 = values.Opset("", 18) - -default_opset = values.Opset("", onnx_opset_version()) - -onnxml1 = values.Opset("ai.onnx.ml", 1) -onnxml2 = values.Opset("ai.onnx.ml", 2) -onnxml3 = values.Opset("ai.onnx.ml", 3) diff --git a/onnxscript/onnx_opset/__init__.py b/onnxscript/onnx_opset/__init__.py new file mode 100644 index 0000000000..4a7729dec0 --- /dev/null +++ b/onnxscript/onnx_opset/__init__.py @@ -0,0 +1,191 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from onnx.defs import onnx_opset_version + +from onnxscript.onnx_opset._impl.opset1 import Opset1 +from onnxscript.onnx_opset._impl.opset2 import Opset2 +from onnxscript.onnx_opset._impl.opset3 import Opset3 +from onnxscript.onnx_opset._impl.opset4 import Opset4 +from onnxscript.onnx_opset._impl.opset5 import Opset5 +from onnxscript.onnx_opset._impl.opset6 import Opset6 +from onnxscript.onnx_opset._impl.opset7 import Opset7 +from onnxscript.onnx_opset._impl.opset8 import Opset8 +from onnxscript.onnx_opset._impl.opset9 import Opset9 +from onnxscript.onnx_opset._impl.opset10 import Opset10 +from onnxscript.onnx_opset._impl.opset11 import Opset11 +from onnxscript.onnx_opset._impl.opset12 import Opset12 +from onnxscript.onnx_opset._impl.opset13 import Opset13 +from onnxscript.onnx_opset._impl.opset14 import Opset14 +from onnxscript.onnx_opset._impl.opset15 import Opset15 +from onnxscript.onnx_opset._impl.opset16 import Opset16 +from onnxscript.onnx_opset._impl.opset17 import Opset17 +from onnxscript.onnx_opset._impl.opset18 import Opset18 +from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1 +from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2 +from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3 +from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import ( + Opset_ai_onnx_preview_training1, +) + +__all__ = [ + "default_opset", + "all_opsets", + "opset1", + "opset2", + "opset3", + "opset4", + "opset5", + "opset6", + "opset7", + "opset8", + "opset9", + "opset10", + "opset11", + "opset12", + "opset13", + "opset14", + "opset15", + "opset16", + "opset17", + "opset18", + "opset_ai_onnx_ml1", + "opset_ai_onnx_ml2", + "opset_ai_onnx_ml3", + "opset_ai_onnx_preview_training1", +] + + +if onnx_opset_version() < 14: + raise ImportError( + f"ONNX Script requires ONNX opset >= 14 but {onnx_opset_version()} is detected." + ) + + +opset1 = Opset1() +opset2 = Opset2() +opset3 = Opset3() +opset4 = Opset4() +opset5 = Opset5() +opset6 = Opset6() +opset7 = Opset7() +opset8 = Opset8() +opset9 = Opset9() +opset10 = Opset10() +opset11 = Opset11() +opset12 = Opset12() +opset13 = Opset13() +opset14 = Opset14() +opset15 = Opset15() +opset16 = Opset16() +opset17 = Opset17() +opset18 = Opset18() +opset_ai_onnx_ml1 = Opset_ai_onnx_ml1() +opset_ai_onnx_ml2 = Opset_ai_onnx_ml2() +opset_ai_onnx_ml3 = Opset_ai_onnx_ml3() +opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1() +all_opsets = { + ( + "", + 1, + ): opset1, + ( + "", + 2, + ): opset2, + ( + "", + 3, + ): opset3, + ( + "", + 4, + ): opset4, + ( + "", + 5, + ): opset5, + ( + "", + 6, + ): opset6, + ( + "", + 7, + ): opset7, + ( + "", + 8, + ): opset8, + ( + "", + 9, + ): opset9, + ( + "", + 10, + ): opset10, + ( + "", + 11, + ): opset11, + ( + "", + 12, + ): opset12, + ( + "", + 13, + ): opset13, + ( + "", + 14, + ): opset14, + ( + "", + 15, + ): opset15, + ( + "", + 16, + ): opset16, + ( + "", + 17, + ): opset17, + ( + "", + 18, + ): opset18, + ( + "ai.onnx.ml", + 1, + ): opset_ai_onnx_ml1, + ( + "ai.onnx.ml", + 2, + ): opset_ai_onnx_ml2, + ( + "ai.onnx.ml", + 3, + ): opset_ai_onnx_ml3, + ( + "ai.onnx.preview.training", + 1, + ): opset_ai_onnx_preview_training1, +} +default_opset: Opset14 = all_opsets[ + ( + "", + onnx_opset_version(), + ) +] # type: ignore diff --git a/onnxscript/onnx_opset/_impl/opset1.py b/onnxscript/onnx_opset/_impl/opset1.py new file mode 100644 index 0000000000..6c5e225da2 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset1.py @@ -0,0 +1,4353 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto, TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset1(Opset): + def __new__(cls): + return Opset.__new__(cls, "", 1) + + def __init__(self): + super().__init__() + + def Abs( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Abs(1)](https://onnx.ai/onnx/operators/onnx__Abs.html#abs-1 "Online Documentation") + + + Absolute takes one input data (Tensor) and produces one output data + (Tensor) where the absolute is, y = abs(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Abs", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Abs", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Add( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Add(1)](https://onnx.ai/onnx/operators/onnx__Add.html#add-1 "Online Documentation") + + + Performs element-wise binary addition (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Add", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Add", schema) + return op( + *self._prepare_inputs(schema, A, B), + axis=axis, + broadcast=broadcast, + consumed_inputs=consumed_inputs, + ) + + def And(self, A: BOOL, B: BOOL, axis: Optional[int] = None, broadcast: int = 0) -> BOOL: + r"""[🌐 And(1)](https://onnx.ai/onnx/operators/onnx__And.html#and-1 "Online Documentation") + + + Returns the tensor resulted from performing the `and` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("And", 1, "") + op: Callable[..., BOOL] = Op(self, "And", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def ArgMax( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + ) -> INT64: + r"""[🌐 ArgMax(1)](https://onnx.ai/onnx/operators/onnx__ArgMax.html#argmax-1 "Online Documentation") + + + Computes the indices of the max elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ArgMax", 1, "") + op: Callable[..., INT64] = Op(self, "ArgMax", schema) + return op(*self._prepare_inputs(schema, data), axis=axis, keepdims=keepdims) + + def ArgMin( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + ) -> INT64: + r"""[🌐 ArgMin(1)](https://onnx.ai/onnx/operators/onnx__ArgMin.html#argmin-1 "Online Documentation") + + + Computes the indices of the min elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ArgMin", 1, "") + op: Callable[..., INT64] = Op(self, "ArgMin", schema) + return op(*self._prepare_inputs(schema, data), axis=axis, keepdims=keepdims) + + def AveragePool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 AveragePool(1)](https://onnx.ai/onnx/operators/onnx__AveragePool.html#averagepool-1 "Online Documentation") + + + AveragePool consumes an input tensor X and applies average pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + average pooling consisting of computing the average on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is divided by the number of elements exclude pad. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("AveragePool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "AveragePool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def BatchNormalization( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + mean: Union[DOUBLE, FLOAT, FLOAT16], + var: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + epsilon: float = 9.999999747378752e-06, + is_test: int = 0, + momentum: float = 0.8999999761581421, + spatial: int = 1, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(1)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-1 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + there are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, mean, var, saved_mean, saved_var (training mode) + Output case #2: Y (test mode) + + + Args: + X: The input 4-dimensional tensor of shape NCHW. + + scale: The scale as a 1-dimensional tensor of size C to be applied to the + output. + + B: The bias as a 1-dimensional tensor of size C to be applied to the output. + + mean: The running mean (training) or the estimated mean (testing) as a + 1-dimensional tensor of size C. + + var: The running variance (training) or the estimated variance (testing) as + a 1-dimensional tensor of size C. + + consumed_inputs: legacy optimization attribute. + + epsilon: The epsilon value to use to avoid division by zero, default is + 1e-5f. + + is_test: If set to nonzero, run spatial batch normalization in test mode, + default is 0. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum), default + is 0.9f. + + spatial: If true, compute the mean and variance across all spatial elements + If false, compute the mean and variance across per feature.Default is 1. + """ + + schema = get_schema("BatchNormalization", 1, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, mean, var), + consumed_inputs=consumed_inputs, + epsilon=epsilon, + is_test=is_test, + momentum=momentum, + spatial=spatial, + ) + + def Cast( + self, + input: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + to: Optional[str] = None, + ) -> Union[ + BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Cast(1)](https://onnx.ai/onnx/operators/onnx__Cast.html#cast-1 "Online Documentation") + + + The operator casts the elements of a given input tensor to a data type + specified by the 'to' argument and returns an output tensor of the same size in + the converted type. The 'to' argument must be one of the data types specified + in the 'DataType' enum field in the TensorProto message. + NOTE: Casting to and from strings is not supported yet. + + + Args: + input: Input tensor to be cast. + + to: The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + """ + + schema = get_schema("Cast", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Cast", schema) + return op(*self._prepare_inputs(schema, input), to=to) + + def Ceil( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Ceil(1)](https://onnx.ai/onnx/operators/onnx__Ceil.html#ceil-1 "Online Documentation") + + + Ceil takes one input data (Tensor) and produces one output data + (Tensor) where the ceil is, y = ceil(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Ceil", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Ceil", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Clip( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + max: Optional[float] = None, + min: Optional[float] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Clip(1)](https://onnx.ai/onnx/operators/onnx__Clip.html#clip-1 "Online Documentation") + + + Clip operator limits the given input within an interval. The interval is + specified with arguments 'min' and 'max'. They default to + numeric_limits::lowest() and numeric_limits::max() respectively. + + + Args: + input: Input tensor whose elements to be clipped + + consumed_inputs: legacy optimization attribute. + + max: Maximum value, above which element is replaced by max + + min: Minimum value, under which element is replaced by min + """ + + schema = get_schema("Clip", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Clip", schema) + return op( + *self._prepare_inputs(schema, input), + consumed_inputs=consumed_inputs, + max=max, + min=min, + ) + + def Concat( + self, *inputs: Union[DOUBLE, FLOAT, FLOAT16], axis: Optional[int] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Concat(1)](https://onnx.ai/onnx/operators/onnx__Concat.html#concat-1 "Online Documentation") + + Concatenate a list of tensors into a single tensor + + Args: + inputs: (variadic) List of tensors for concatenation + + axis: Which axis to concat on. Default value is 1. + """ + + schema = get_schema("Concat", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Concat", schema) + return op(*self._prepare_inputs(schema, *inputs), axis=axis) + + def Constant(self, value: Optional[TensorProto] = None) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Constant(1)](https://onnx.ai/onnx/operators/onnx__Constant.html#constant-1 "Online Documentation") + + A constant tensor. + + Args: + value: The value for the elements of the output tensor. + """ + + schema = get_schema("Constant", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Constant", schema) + return op(value=value) + + def Conv( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Conv(1)](https://onnx.ai/onnx/operators/onnx__Conv.html#conv-1 "Online Documentation") + + + The convolution operator consumes an input tensor and a filter, and + computes the output. + + Args: + X: Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + W: The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based + indices for the shape array). Or in other words FILTER_IN_CHANNEL should + be equal to DATA_CHANNEL. + + B: (optional) Optional 1D bias to be added to the convolution, has size of + M. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + dilations: dilation value along each spatial axis of the filter. + + group: number of groups input channels and output channels are divided into. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input W. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("Conv", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Conv", schema) + return op( + *self._prepare_inputs(schema, X, W, B), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def ConvTranspose( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + output_padding: Optional[Sequence[int]] = None, + output_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 ConvTranspose(1)](https://onnx.ai/onnx/operators/onnx__ConvTranspose.html#convtranspose-1 "Online Documentation") + + + The convolution transpose operator consumes an input tensor and a filter, + and computes the output. + + If the pads parameter is provided the shape of the output is calculated via the following equation: + + output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i] + + output_shape can also be explicitly specified in which case pads values are auto generated using these equations: + + total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] + If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) + Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2). + + + + Args: + X: Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn) + + W: The weight tensor that will be used in the convolutions; has size (C x + M/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the weight shape will be (C x M/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the + kernel. The number of channels in the output should be equal to + W.shape[1] * group (assuming zero based indices of the shape array) + + B: (optional) Optional 1D bias to be added to the convolution, has size of + M. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + dilations: dilation value along each spatial axis of the filter. + + group: number of groups input channels and output channels are divided into. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input W. + + output_padding: The zero-padding added to one side of the output. This is + also called adjs/adjustment in some frameworks. + + output_shape: The shape of the output can be explicitly set which will cause + pads values to be auto generated. If output_shape is specified pads + values are ignored. See doc for details for equations to generate pads + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("ConvTranspose", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "ConvTranspose", schema) + return op( + *self._prepare_inputs(schema, X, W, B), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + output_padding=output_padding, + output_shape=output_shape, + pads=pads, + strides=strides, + ) + + def DepthToSpace( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + blocksize: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 DepthToSpace(1)](https://onnx.ai/onnx/operators/onnx__DepthToSpace.html#depthtospace-1 "Online Documentation") + + DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. + This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of + the input tensor where values from the depth dimension are moved in spatial blocks to the height + and width dimensions. + + + Args: + input: Input tensor of [N,C,H,W], where N is the batch axis, C is the + channel or depth, H is the height and W is the width. + + blocksize: Blocks of [blocksize, blocksize] are moved. + """ + + schema = get_schema("DepthToSpace", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "DepthToSpace", schema) + return op(*self._prepare_inputs(schema, input), blocksize=blocksize) + + def Div( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Div(1)](https://onnx.ai/onnx/operators/onnx__Div.html#div-1 "Online Documentation") + + + Performs element-wise binary division (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Div", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Div", schema) + return op( + *self._prepare_inputs(schema, A, B), + axis=axis, + broadcast=broadcast, + consumed_inputs=consumed_inputs, + ) + + def Dropout( + self, + data: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + is_test: int = 0, + ratio: float = 0.5, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 Dropout(1)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-1 "Online Documentation") + + + Dropout takes one input data (Tensor) and produces two Tensor outputs, + output (Tensor) and mask (Tensor). Depending on whether it is in + test mode or not, the output Y will either be a random dropout, or a simple + copy of the input. Note that our implementation of Dropout does scaling in + the training phase, so during testing nothing needs to be done. + + + Args: + data: The input data as Tensor. + + consumed_inputs: legacy optimization attribute. + + is_test: (int, default 0) if nonzero, run dropout in test mode where the + output is simply Y = X. + + ratio: (float, default 0.5) the ratio of random dropout + """ + + schema = get_schema("Dropout", 1, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "Dropout", schema) + return op( + *self._prepare_inputs(schema, data), + consumed_inputs=consumed_inputs, + is_test=is_test, + ratio=ratio, + ) + + def Elu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.0, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Elu(1)](https://onnx.ai/onnx/operators/onnx__Elu.html#elu-1 "Online Documentation") + + + Elu takes one input data (Tensor) and produces one output data + (Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x < + 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. + + + + Args: + X: 1D input tensor + + alpha: Coefficient of ELU default to 1.0. + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Elu", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Elu", schema) + return op( + *self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs + ) + + def Equal( + self, + A: Union[BOOL, INT32, INT64], + B: Union[BOOL, INT32, INT64], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> BOOL: + r"""[🌐 Equal(1)](https://onnx.ai/onnx/operators/onnx__Equal.html#equal-1 "Online Documentation") + + + Returns the tensor resulted from performing the `equal` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("Equal", 1, "") + op: Callable[..., BOOL] = Op(self, "Equal", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def Exp( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Exp(1)](https://onnx.ai/onnx/operators/onnx__Exp.html#exp-1 "Online Documentation") + + + Calculates the exponential of the given input tensor, element-wise. + + + Args: + input: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Exp", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Exp", schema) + return op(*self._prepare_inputs(schema, input), consumed_inputs=consumed_inputs) + + def Flatten( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Flatten(1)](https://onnx.ai/onnx/operators/onnx__Flatten.html#flatten-1 "Online Documentation") + + + Flattens the input tensor into a 2D matrix. If input tensor has shape + (d_0, d_1, ... d_n) then the output will have shape + (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). + + + Args: + input: A tensor of rank >= axis. + + axis: Indicate up to which input dimensions (exclusive) should be flattened + to the outer dimension of the output. The value for axis must be in the + range [0, R], where R is the rank of the input tensor. When axis = 0, + the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the + shape of the input tensor is (d_0, d_1, ... d_n). + """ + + schema = get_schema("Flatten", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Flatten", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Floor( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Floor(1)](https://onnx.ai/onnx/operators/onnx__Floor.html#floor-1 "Online Documentation") + + + Floor takes one input data (Tensor) and produces one output data + (Tensor) where the floor is, y = floor(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Floor", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Floor", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def GRU( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "foward", + hidden_size: Optional[int] = None, + output_sequence: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 GRU(1)](https://onnx.ai/onnx/operators/onnx__GRU.html#gru-1 "Online Documentation") + + + Computes an one-layer GRU. This operator is usually supported via some custom + implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `z` - update gate + + `r` - reset gate + + `h` - hidden gate + + `t` - time step (t-1 means previous time step) + + `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates + + `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates + + `Wb[zrh]` - W bias vectors for update, reset, and hidden gates + + `Rb[zrh]` - R bias vectors for update, reset, and hidden gates + + `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates + + `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates + + `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates + + `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh): + + - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz) + + - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr) + + - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0 + + - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0 + + - Ht = (1 - zt) (.) ht + zt (.) Ht-1 + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` + (if bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, input_size]`. + + R: The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if + bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, hidden_size]`. + + B: (optional) The bias tensor for the gates. Concatenation of `[Wb[zrh], + Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension + 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If + not specified - assumed to be 0 + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. + + activations: A list of 2 (or 4 if bidirectional) activation functions for + update, reset, and hidden gates. The activation functions must be one of + the activation functions specified above. Optional: See the equations + for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + output_sequence: The sequence output for the hidden is optional if 0. + Default 0. + """ + + schema = get_schema("GRU", 1, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "GRU", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + output_sequence=output_sequence, + ) + + def Gather( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Gather(1)](https://onnx.ai/onnx/operators/onnx__Gather.html#gather-1 "Online Documentation") + + + Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather + entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates + them in an output tensor of rank q + (r - 1). + Example 1: + :: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + indices = [ + [0, 1], + [1, 2], + ] + output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], + ] + + + Example 2: + :: + + data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], + ] + indices = [ + [0, 2], + ] + axis = 1, + output = [ + [ + [1.0, 1.9], + [2.3, 3.9], + [4.5, 5.9], + ], + ] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of int32/int64 indices, of any rank q. All index values are + expected to be within bounds. It is an error if any of the index values + are out of bounds. + + axis: Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] + """ + + schema = get_schema("Gather", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Gather", schema) + return op(*self._prepare_inputs(schema, data, indices), axis=axis) + + def Gemm( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + C: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.0, + beta: float = 1.0, + broadcast: int = 0, + transA: int = 0, + transB: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Gemm(1)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-1 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + Compute Y = alpha * A * B + beta * C, where input tensor A has + dimension (M X K), input tensor B has dimension (K X N), input tensor C and + output tensor Y have dimension (M X N). + If attribute broadcast is non-zero, input tensor C will be broadcasted to match + the dimension requirement. A will be transposed before doing the computation + if attribute transA is non-zero, same for B and transB. + + + Args: + A: Input tensor A + + B: Input tensor B + + C: Input tensor C, can be inplace. + + alpha: Scalar multiplier for the product of input tensors A * B, the default + value is 1.0. + + beta: Scalar multiplier for input tensor C, the default value is 1.0. + + broadcast: Whether C should be broadcasted + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Gemm", schema) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + broadcast=broadcast, + transA=transA, + transB=transB, + ) + + def GlobalAveragePool( + self, X: Union[DOUBLE, FLOAT, FLOAT16] + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 GlobalAveragePool(1)](https://onnx.ai/onnx/operators/onnx__GlobalAveragePool.html#globalaveragepool-1 "Online Documentation") + + + GlobalAveragePool consumes an input tensor X and applies average pooling across + the values in the same channel. This is equivalent to AveragePool with kernel size + equal to the spatial dimension of input tensor. + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. + """ + + schema = get_schema("GlobalAveragePool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "GlobalAveragePool", schema + ) + return op(*self._prepare_inputs(schema, X)) + + def GlobalLpPool( + self, X: Union[DOUBLE, FLOAT, FLOAT16], p: float = 2.0 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 GlobalLpPool(1)](https://onnx.ai/onnx/operators/onnx__GlobalLpPool.html#globallppool-1 "Online Documentation") + + + GlobalLpPool consumes an input tensor X and applies lp pool pooling across the + the values in the same channel. This is equivalent to LpPool with kernel size + equal to the spatial dimension of input tensor. + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + + p: p value of the Lp norm used to pool over the input data, default is 2.0. + """ + + schema = get_schema("GlobalLpPool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "GlobalLpPool", schema) + return op(*self._prepare_inputs(schema, X), p=p) + + def GlobalMaxPool(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 GlobalMaxPool(1)](https://onnx.ai/onnx/operators/onnx__GlobalMaxPool.html#globalmaxpool-1 "Online Documentation") + + + GlobalMaxPool consumes an input tensor X and applies max pooling across + the values in the same channel. This is equivalent to MaxPool with kernel size + equal to the spatial dimension of input tensor. + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. + """ + + schema = get_schema("GlobalMaxPool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "GlobalMaxPool", schema) + return op(*self._prepare_inputs(schema, X)) + + def Greater( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> BOOL: + r"""[🌐 Greater(1)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-1 "Online Documentation") + + + Returns the tensor resulted from performing the `greater` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("Greater", 1, "") + op: Callable[..., BOOL] = Op(self, "Greater", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def HardSigmoid( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 0.20000000298023224, + beta: float = 0.5, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 HardSigmoid(1)](https://onnx.ai/onnx/operators/onnx__HardSigmoid.html#hardsigmoid-1 "Online Documentation") + + + HardSigmoid takes one input data (Tensor) and produces one output data + (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), + is applied to the tensor elementwise. + + + Args: + X: Input tensor + + alpha: Value of alpha default to 0.2 + + beta: Value of beta default to 0.5 + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("HardSigmoid", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "HardSigmoid", schema) + return op( + *self._prepare_inputs(schema, X), + alpha=alpha, + beta=beta, + consumed_inputs=consumed_inputs, + ) + + def Hardmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Hardmax(1)](https://onnx.ai/onnx/operators/onnx__Hardmax.html#hardmax-1 "Online Documentation") + + + The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch + of the given input. The input is a 2-D tensor (Tensor) of size + (batch_size x input_feature_dimensions). The output tensor has the same shape + and contains the hardmax values of the corresponding input. + + Input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size + """ + + schema = get_schema("Hardmax", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Hardmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Identity( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Identity(1)](https://onnx.ai/onnx/operators/onnx__Identity.html#identity-1 "Online Documentation") + + Identity operator + + Args: + input: Input tensor + """ + + schema = get_schema("Identity", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Identity", schema) + return op(*self._prepare_inputs(schema, input)) + + def If( + self, + cond: BOOL, + else_branch: Optional[GraphProto] = None, + then_branch: Optional[GraphProto] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 If(1)](https://onnx.ai/onnx/operators/onnx__If.html#if-1 "Online Documentation") + + If conditional + + Args: + cond: Condition for the if + + else_branch: Graph to run if condition is false. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the then_branch. + + then_branch: Graph to run if condition is true. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the else_branch. + """ + + schema = get_schema("If", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "If", schema) + return op( + *self._prepare_inputs(schema, cond), + else_branch=else_branch, + then_branch=then_branch, + ) + + def InstanceNormalization( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + epsilon: float = 9.999999747378752e-06, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 InstanceNormalization(1)](https://onnx.ai/onnx/operators/onnx__InstanceNormalization.html#instancenormalization-1 "Online Documentation") + + + Carries out instance normalization as described in the paper + https://arxiv.org/abs/1607.08022. + + y = scale * (x - mean) / sqrt(variance + epsilon) + B, + where mean and variance are computed per instance per channel. + + + + Args: + input: The input 4-dimensional tensor of shape NCHW. + + scale: The input 1-dimensional scale tensor of size C. + + B: The input 1-dimensional bias tensor of size C. + + consumed_inputs: legacy optimization attribute. + + epsilon: The epsilon value to use to avoid division by zero, default is + 1e-5f. + """ + + schema = get_schema("InstanceNormalization", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "InstanceNormalization", schema + ) + return op( + *self._prepare_inputs(schema, input, scale, B), + consumed_inputs=consumed_inputs, + epsilon=epsilon, + ) + + def LRN( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 9.999999747378752e-05, + beta: float = 0.75, + bias: float = 1.0, + size: Optional[int] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LRN(1)](https://onnx.ai/onnx/operators/onnx__LRN.html#lrn-1 "Online Documentation") + + + Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). + It normalizes over local input regions. + The local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor + of shape (N x C x D1 x D2, ..., Dk), its region is + {X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}. + + square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), + where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)). + + Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + alpha: Scaling parameter. + + beta: The exponent. + + size: The number of channels to sum over + """ + + schema = get_schema("LRN", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LRN", schema) + return op( + *self._prepare_inputs(schema, X), alpha=alpha, beta=beta, bias=bias, size=size + ) + + def LSTM( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + initial_c: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + P: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + input_forget: int = 0, + output_sequence: int = 0, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 LSTM(1)](https://onnx.ai/onnx/operators/onnx__LSTM.html#lstm-1 "Online Documentation") + + + Computes an one-layer LSTM. This operator is usually supported via some + custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `o` - output gate + + `f` - forget gate + + `c` - cell gate + + `t` - time step (t-1 means previous time step) + + `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates + + `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates + + `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates + + `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates + + `P[iof]` - P peephole weight vector for input, output, and forget gates + + `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates + + `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates + + `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates + + `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates + + `PB[iof]` - P peephole weight vector for backward input, output, and forget gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): + + - it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi) + + - ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf) + + - ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc) + + - Ct = ft (.) Ct-1 + it (.) ct + + - ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo) + + - Ht = ot (.) h(Ct) + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for the gates. Concatenation of `W[iofc]` and + `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape + `[num_directions, 4*hidden_size, input_size]`. + + R: The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` + (if bidirectional) along dimension 0. This tensor has shape + `[num_directions, 4*hidden_size, hidden_size]`. + + B: (optional) The bias tensor for input gate. Concatenation of `[Wb[iofc], + Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along + dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. + Optional: If not specified - assumed to be 0. + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + initial_c: (optional) Optional initial value of the cell. If not specified - + assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + P: (optional) The weight tensor for peepholes. Concatenation of `P[iof]` and + `PB[iof]` (if bidirectional) along dimension 0. It has shape + `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed + to be 0. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 3 (or 6 if bidirectional) activation functions for + input, output, forget, cell, and hidden. The activation functions must + be one of the activation functions specified above. Optional: See the + equations for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + input_forget: Couple the input and forget gates if 1, default 0. + + output_sequence: The sequence output for the hidden is optional if 0. + Default 0. + """ + + schema = get_schema("LSTM", 1, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "LSTM", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h, initial_c, P), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + input_forget=input_forget, + output_sequence=output_sequence, + ) + + def LeakyRelu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 0.009999999776482582, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LeakyRelu(1)](https://onnx.ai/onnx/operators/onnx__LeakyRelu.html#leakyrelu-1 "Online Documentation") + + + LeakyRelu takes input data (Tensor) and an argument alpha, and produces one + output data (Tensor) where the function `f(x) = alpha * x for x < 0`, + `f(x) = x for x >= 0`, is applied to the data tensor elementwise. + + + Args: + X: Input tensor + + alpha: Coefficient of leakage default to 0.01. + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("LeakyRelu", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LeakyRelu", schema) + return op( + *self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs + ) + + def Less( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> BOOL: + r"""[🌐 Less(1)](https://onnx.ai/onnx/operators/onnx__Less.html#less-1 "Online Documentation") + + + Returns the tensor resulted from performing the `less` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("Less", 1, "") + op: Callable[..., BOOL] = Op(self, "Less", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def Log( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Log(1)](https://onnx.ai/onnx/operators/onnx__Log.html#log-1 "Online Documentation") + + + Calculates the natural log of the given input tensor, element-wise. + + + Args: + input: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Log", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Log", schema) + return op(*self._prepare_inputs(schema, input), consumed_inputs=consumed_inputs) + + def LogSoftmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LogSoftmax(1)](https://onnx.ai/onnx/operators/onnx__LogSoftmax.html#logsoftmax-1 "Online Documentation") + + + The operator computes the logsoftmax (log of softmax) values for each layer in the batch + of the given input. The input is a 2-D tensor (Tensor) of size + (batch_size x input_feature_dimensions). The output tensor has the same shape + and contains the logsoftmax values of the corresponding input. + + Input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size + """ + + schema = get_schema("LogSoftmax", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LogSoftmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Loop( + self, + M: Optional[INT64], + cond: Optional[BOOL], + *v_initial: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Loop(1)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-1 "Online Documentation") + + + Generic Looping construct. This loop has multiple termination conditions: + + 1) Trip count. Iteration count specified at runtime. Set by + specifying the input M. Optional. Set to empty string to omit. + Note that a static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. + 2) Loop termination condition. This is an input to the op that determines + whether to run the first iteration and also a loop-carried dependency for + the body graph. The body graph must yield a value for the condition variable, + whether this input is provided or not. + + This table summarizes the operating modes of this operator with equivalent + C-style code: + + Operator inputs defined as (max_trip_count, condition_var). + + input ("", ""): + for (int i=0; ; ++i) { + cond = ... // Note this value is ignored, but is required in the body + } + + input ("", cond) // Note this is analogous to a while loop + bool cond = ...; + for (int i=0; cond; ++i) { + cond = ...; + } + + input ("", 1) // Note this is analogous to a do-while loop + bool cond = true + for (int i=0; cond; ++i) { + cond = ...; + } + + input (trip_count, "") // Note this is analogous to a for loop + int trip_count = ... + for (int i=0; i < trip_count; ++i) { + cond = ...; // ignored + } + + input (trip_count, cond) + int trip_count = ...; + bool cond = ...; + for (int i=0; i < trip_count && cond; ++i) { + cond = ...; + } + + + *Sample usage - cond as well as trip count* + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] + %keepgoing[BOOL, scalar] + %b[INT32, scalar] + ) { + %my_local = Add(%a, %b) + %b_out = Sub(%a, %b) + %keepgoing_out = Greater(%my_local, %b_out) + %user_defined_vals = Add(%b, %b) + return %keepgoing_out, %b_out, %user_defined_vals + } + + *Sample equivalent C code* + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + for (int i=0; i < max_trip_count && keepgoing; ++i) { + /* User-defined code (loop body) */ + int my_local = a + b; // Reading values in the enclosing scope is fine + b = a - b; // writes fine if we specify b as a loop-carried dependency + keepgoing = my_local > b; // keepgoing is a loop-carried dependency + user_defined_vals[i] = b + b; + /* End user-defined code */ + } + // my_local = 123; // Can't do this. my_local was defined in the body + + // These below values are live-out from the loop and therefore accessible + b_out; user_defined_vals; keepgoing_out; + } + + There are several things of note in this code snippet: + + 1) Values from the enclosing scope (i.e. variable a here) are in scope and can + be referenced in the inputs of the loop. + 2) Any variables which you wish to make available in the enclosing scope (i.e. + the variables b and keepgoing) must be declared as either loop-carried + dependencies (both at the op inputs and output and at the body net input and + output) or scan_outputs. + 3) Values created in the body cannot be accessed in the enclosing scope. + + Note that the semantics of this op support "diagonal" or "wavefront" execution. + (See Step 3 here for an example: + https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). + Frontends should emit multi-layer RNNs as a series of While operators (with + time being the inner looping dimension), with each successive layer consuming + the scan_outputs from the previous layer, possibly going through several + point-wise operators (e.g. dropout, residual connections, linear layer). + + + Args: + M: (optional) A maximum trip-count for the loop specified at runtime. + Optional. Pass empty string to skip. + + cond: (optional) A boolean termination condition. Optional. Pass empty + string to skip. + + v_initial: (variadic, heterogeneous) The initial values of any loop-carried + dependencies (values that change across loop iterations) + + body: The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + """ + + schema = get_schema("Loop", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Loop", schema) + return op(*self._prepare_inputs(schema, M, cond, *v_initial), body=body) + + def LpNormalization( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = -1, p: int = 2 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LpNormalization(1)](https://onnx.ai/onnx/operators/onnx__LpNormalization.html#lpnormalization-1 "Online Documentation") + + + Given a matrix, apply Lp-normalization along the provided axis. + + + Args: + input: (differentiable) Input matrix + + axis: The axis on which to apply normalization, -1 mean last axis. + + p: The order of the normalization, only 1 or 2 are supported. + """ + + schema = get_schema("LpNormalization", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LpNormalization", schema) + return op(*self._prepare_inputs(schema, input), axis=axis, p=p) + + def LpPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + p: float = 2.0, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LpPool(1)](https://onnx.ai/onnx/operators/onnx__LpPool.html#lppool-1 "Online Documentation") + + + LpPool consumes an input tensor X and applies Lp pooling across the + the tensor according to kernel sizes, stride sizes, and pad lengths. + Lp pooling consisting of computing the Lp norm on all values of a subset + of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output size + match the input.In case of odd number add the extra padding at the end + for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. DEPRECATION NOTE: auto_pad is only intended to support legacy + uses, and for framework authors, one is explicitly encouraged to use + explicit padding specified in the pads attribute. + + kernel_shape: The size of the kernel along each axis. + + p: p value of the Lp norm used to pool over the input data, default is 2.0. + + pads: Padding for the beginning and ending along each axis, it can take any + value greater than or equal to 0. The value represent the number of + pixels added to the beginning and end part of the corresponding axis. + `pads` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number of pixels added at the beginning + of axis `i` and xi_end, the number of pixels added at the end of axis + `i`. This attribute cannot be used simultaneously with auto_pad + attribute. + + strides: Stride along each axis. + """ + + schema = get_schema("LpPool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LpPool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + p=p, + pads=pads, + strides=strides, + ) + + def MatMul( + self, A: Union[DOUBLE, FLOAT, FLOAT16], B: Union[DOUBLE, FLOAT, FLOAT16] + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MatMul(1)](https://onnx.ai/onnx/operators/onnx__MatMul.html#matmul-1 "Online Documentation") + + + Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html + + + Args: + A: N-dimensional matrix A + + B: N-dimensional matrix B + """ + + schema = get_schema("MatMul", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "MatMul", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Max( + self, + *data_0: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Max(1)](https://onnx.ai/onnx/operators/onnx__Max.html#max-1 "Online Documentation") + + + Element-wise max of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Max. + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Max", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Max", schema) + return op(*self._prepare_inputs(schema, *data_0), consumed_inputs=consumed_inputs) + + def MaxPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MaxPool(1)](https://onnx.ai/onnx/operators/onnx__MaxPool.html#maxpool-1 "Online Documentation") + + + MaxPool consumes an input tensor X and applies max pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + max pooling consisting of computing the max on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is maximum number of elements exclude pad. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("MaxPool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "MaxPool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def MaxRoiPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + rois: Union[DOUBLE, FLOAT, FLOAT16], + pooled_shape: Optional[Sequence[int]] = None, + spatial_scale: float = 1.0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MaxRoiPool(1)](https://onnx.ai/onnx/operators/onnx__MaxRoiPool.html#maxroipool-1 "Online Documentation") + + + ROI max pool consumes an input tensor X and region of interests (RoIs) to + apply max pooling across each RoI, to produce output 4-D tensor of shape + (num_rois, channels, pooled_shape[0], pooled_shape[1]). + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. + + rois: (non-differentiable) RoIs (Regions of Interest) to pool over. Should + be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, + y2], ...]. + + pooled_shape: ROI pool output shape (height, width). + + spatial_scale: Multiplicative spatial scale factor to translate ROI + coordinates from their input scale to the scale used when pooling. + """ + + schema = get_schema("MaxRoiPool", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "MaxRoiPool", schema) + return op( + *self._prepare_inputs(schema, X, rois), + pooled_shape=pooled_shape, + spatial_scale=spatial_scale, + ) + + def Mean( + self, + *data_0: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mean(1)](https://onnx.ai/onnx/operators/onnx__Mean.html#mean-1 "Online Documentation") + + + Element-wise mean of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Mean. + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Mean", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mean", schema) + return op(*self._prepare_inputs(schema, *data_0), consumed_inputs=consumed_inputs) + + def Min( + self, + *data_0: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Min(1)](https://onnx.ai/onnx/operators/onnx__Min.html#min-1 "Online Documentation") + + + Element-wise min of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Min + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Min", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Min", schema) + return op(*self._prepare_inputs(schema, *data_0), consumed_inputs=consumed_inputs) + + def Mul( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mul(1)](https://onnx.ai/onnx/operators/onnx__Mul.html#mul-1 "Online Documentation") + + + Performs element-wise binary multiplication (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Mul", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mul", schema) + return op( + *self._prepare_inputs(schema, A, B), + axis=axis, + broadcast=broadcast, + consumed_inputs=consumed_inputs, + ) + + def Neg( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Neg(1)](https://onnx.ai/onnx/operators/onnx__Neg.html#neg-1 "Online Documentation") + + + Neg takes one input data (Tensor) and produces one output data + (Tensor) where each element flipped sign, y = -x, is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Neg", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Neg", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Not(self, X: BOOL) -> BOOL: + r"""[🌐 Not(1)](https://onnx.ai/onnx/operators/onnx__Not.html#not-1 "Online Documentation") + + + Returns the negation of the input tensor element-wise. + + + Args: + X: (non-differentiable) Input tensor + """ + + schema = get_schema("Not", 1, "") + op: Callable[..., BOOL] = Op(self, "Not", schema) + return op(*self._prepare_inputs(schema, X)) + + def Or(self, A: BOOL, B: BOOL, axis: Optional[int] = None, broadcast: int = 0) -> BOOL: + r"""[🌐 Or(1)](https://onnx.ai/onnx/operators/onnx__Or.html#or-1 "Online Documentation") + + + Returns the tensor resulted from performing the `or` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("Or", 1, "") + op: Callable[..., BOOL] = Op(self, "Or", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def PRelu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + slope: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 PRelu(1)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-1 "Online Documentation") + + + + PRelu takes input data (Tensor) and slope tensor as input, and produces one + output data (Tensor) where the function `f(x) = slope * x for x < 0`, + `f(x) = x for x >= 0`., is applied to the data tensor elementwise. + + + + Args: + X: Input tensor + + slope: Slope tensor. If `Slope` is of size 1, the value is sharedacross + different channels + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("PRelu", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "PRelu", schema) + return op(*self._prepare_inputs(schema, X, slope), consumed_inputs=consumed_inputs) + + def Pad( + self, + data: Union[DOUBLE, FLOAT, FLOAT16], + mode: str = "constant", + paddings: Optional[Sequence[int]] = None, + value: float = 0.0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Pad(1)](https://onnx.ai/onnx/operators/onnx__Pad.html#pad-1 "Online Documentation") + + + Given `data` tensor, paddings, mode, and value. + Example: + Insert 0 paddings to the beginning of the second dimension. + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + paddings = [0, 0, 2, 0] + output = [ + [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ], + ] + + + Args: + data: Input tensor. + + mode: Three modes: constant(default), reflect, edge + + paddings: List of integers indicate the padding element count at the + beginning and end of each axis, for 2D it is the number of pixel. + `paddings` rank should be double of the input's rank. `paddings` format + should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where + xi_begin the number of pixels added at the beginning of axis `i` and + xi_end, the number of pixels added at the end of axis `i`. + + value: One float, indicates the value to be filled, default is 0 + """ + + schema = get_schema("Pad", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Pad", schema) + return op( + *self._prepare_inputs(schema, data), mode=mode, paddings=paddings, value=value + ) + + def Pow( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + Y: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Pow(1)](https://onnx.ai/onnx/operators/onnx__Pow.html#pow-1 "Online Documentation") + + + Pow takes input data (Tensor) and exponent Tensor, and + produces one output data (Tensor) where the function `f(x) = x^exponent`, + is applied to the data tensor elementwise. + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + X: Input tensor of any shape, base of the exponent. + + Y: Input tensor of any shape broadcastable to X shape, the exponent + component. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + """ + + schema = get_schema("Pow", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Pow", schema) + return op(*self._prepare_inputs(schema, X, Y), axis=axis, broadcast=broadcast) + + def RNN( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Sequence[str] = ("Tanh", "Tanh"), + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + output_sequence: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 RNN(1)](https://onnx.ai/onnx/operators/onnx__RNN.html#rnn-1 "Online Documentation") + + + Computes an one-layer simple RNN. This operator is usually supported + via some custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `t` - time step (t-1 means previous time step) + + `Wi` - W parameter weight matrix for input gate + + `Ri` - R recurrence weight matrix for input gate + + `Wbi` - W parameter bias vector for input gate + + `Rbi` - R parameter bias vector for input gate + + `WBi` - W parameter weight matrix for backward input gate + + `RBi` - R recurrence weight matrix for backward input gate + + `WBbi` - WR bias vectors for backward input gate + + `RBbi` - RR bias vectors for backward input gate + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Tanh): + + - Ht = f(Xt*(Wi^T) + Ht-1*Ri + Wbi + Rbi) + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if + bidirectional). The tensor has shape `[num_directions, hidden_size, + input_size]`. + + R: The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if + bidirectional). The tensor has shape `[num_directions, hidden_size, + hidden_size]`. + + B: (optional) The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` + and `[WBbi, RBbi]` (if bidirectional). The tensor has shape + `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed + to be 0. + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: One (or two if bidirectional) activation function for input + gate. The activation function must be one of the activation functions + specified above. Optional: Default `Tanh` if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + output_sequence: The sequence output for the hidden is optional if 0. + Default 0. + """ + + schema = get_schema("RNN", 1, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "RNN", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + output_sequence=output_sequence, + ) + + def RandomNormal( + self, + dtype: int = 1, + mean: float = 0.0, + scale: float = 1.0, + seed: Optional[float] = None, + shape: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RandomNormal(1)](https://onnx.ai/onnx/operators/onnx__RandomNormal.html#randomnormal-1 "Online Documentation") + + + Generate a tensor with random values drawn from a normal distribution. The shape + of the tensor is specified by the `shape` argument and the parameter of the normal distribution + specified by `mean` and `scale`. + + The data type is specified by the 'dtype' argument. The 'dtype' argument must + be one of the data types specified in the 'DataType' enum field in the + TensorProto message. + + + Args: + dtype: The data type for the elements of the output tensor. Default is + TensorProto::FLOAT. + + mean: The mean of the normal distribution. + + scale: The standard deviation of the normal distribution. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + + shape: The shape of the output tensor. + """ + + schema = get_schema("RandomNormal", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "RandomNormal", schema) + return op(dtype=dtype, mean=mean, scale=scale, seed=seed, shape=shape) + + def RandomNormalLike( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + dtype: Optional[int] = None, + mean: float = 0.0, + scale: float = 1.0, + seed: Optional[float] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RandomNormalLike(1)](https://onnx.ai/onnx/operators/onnx__RandomNormalLike.html#randomnormallike-1 "Online Documentation") + + + Generate a tensor with random values drawn from a normal distribution. + The shape of the output tensor is copied from the shape of the input tensor, + and the parameters of the normal distribution are specified by `mean` and `scale`. + + The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. + The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the + TensorProto message, and be valid as an output type. + + + Args: + input: Input tensor to copy shape and optionally type information from. + + dtype: (Optional) The data type for the elements of the output tensor, if + not specified, we will use the data type of the input tensor. + + mean: The mean of the normal distribution. + + scale: The standard deviation of the normal distribution. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("RandomNormalLike", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "RandomNormalLike", schema) + return op( + *self._prepare_inputs(schema, input), + dtype=dtype, + mean=mean, + scale=scale, + seed=seed, + ) + + def RandomUniform( + self, + dtype: int = 1, + high: float = 1.0, + low: float = 0.0, + seed: Optional[float] = None, + shape: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RandomUniform(1)](https://onnx.ai/onnx/operators/onnx__RandomUniform.html#randomuniform-1 "Online Documentation") + + + Generate a tensor with random values drawn from a uniform distribution. The shape + of the tensor is specified by the `shape` argument and the range by `low` and `high`. + + The data type is specified by the 'dtype' argument. The 'dtype' argument must + be one of the data types specified in the 'DataType' enum field in the + TensorProto message. + + + Args: + dtype: The data type for the elements of the output tensor. If not + specified, default is TensorProto::FLOAT. + + high: Upper boundary of the output values. + + low: Lower boundary of the output values. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + + shape: The shape of the output tensor. + """ + + schema = get_schema("RandomUniform", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "RandomUniform", schema) + return op(dtype=dtype, high=high, low=low, seed=seed, shape=shape) + + def RandomUniformLike( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + dtype: Optional[int] = None, + high: float = 1.0, + low: float = 0.0, + seed: Optional[float] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RandomUniformLike(1)](https://onnx.ai/onnx/operators/onnx__RandomUniformLike.html#randomuniformlike-1 "Online Documentation") + + + Generate a tensor with random values drawn from a uniform distribution. + The shape of the output tensor is copied from the shape of the input tensor, + and the parameters of the uniform distribution are specified by `low` and `high`. + + The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. + The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the + TensorProto message and be valid as an output type. + + + Args: + input: Input tensor to copy shape and optionally type information from. + + dtype: (Optional) The data type for the elements of the output tensor, if + not specified, we will use the data type of the input tensor. + + high: Upper boundary of the output values. + + low: Lower boundary of the output values. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("RandomUniformLike", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "RandomUniformLike", schema + ) + return op( + *self._prepare_inputs(schema, input), dtype=dtype, high=high, low=low, seed=seed + ) + + def Reciprocal( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Reciprocal(1)](https://onnx.ai/onnx/operators/onnx__Reciprocal.html#reciprocal-1 "Online Documentation") + + + Reciprocal takes one input data (Tensor) and produces one output data + (Tensor) where the reciprocal is, y = 1/x, is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Reciprocal", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Reciprocal", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def ReduceL1( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL1(1)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-1 "Online Documentation") + + + Computes the L1 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL1", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceL1", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceL2( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL2(1)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-1 "Online Documentation") + + + Computes the L2 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL2", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceL2", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSum( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-1 "Online Documentation") + + + Computes the log sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSum", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceLogSum", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSumExp( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSumExp(1)](https://onnx.ai/onnx/operators/onnx__ReduceLogSumExp.html#reducelogsumexp-1 "Online Documentation") + + + Computes the log sum exponent of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSumExp", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceLogSumExp", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMax( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMax(1)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-1 "Online Documentation") + + + Computes the max of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMax", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMax", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMean( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMean(1)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-1 "Online Documentation") + + + Computes the mean of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMean", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMean", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMin( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMin(1)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-1 "Online Documentation") + + + Computes the min of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMin", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMin", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceProd( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceProd(1)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-1 "Online Documentation") + + + Computes the product of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceProd", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceProd", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceSum( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceSum.html#reducesum-1 "Online Documentation") + + + Computes the sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceSum", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceSum", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceSumSquare( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSumSquare(1)](https://onnx.ai/onnx/operators/onnx__ReduceSumSquare.html#reducesumsquare-1 "Online Documentation") + + + Computes the sum square of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceSumSquare", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceSumSquare", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def Relu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Relu(1)](https://onnx.ai/onnx/operators/onnx__Relu.html#relu-1 "Online Documentation") + + + Relu takes one input data (Tensor) and produces one output data + (Tensor) where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Relu", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Relu", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Reshape( + self, + data: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + shape: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Reshape(1)](https://onnx.ai/onnx/operators/onnx__Reshape.html#reshape-1 "Online Documentation") + + + Reshape the input tensor similar to numpy.reshape. + It takes a tensor as input and an argument `shape`. It outputs the reshaped tensor. + At most one dimension of the new shape can be -1. In this case, the value is + inferred from the size of the tensor and the remaining dimensions. A dimension + could also be 0, in which case the actual dimension value is unchanged (i.e. taken + from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. + The input tensor's shape and the output tensor's shape are required to have the same number of elements. + + Args: + data: An input tensor. + + consumed_inputs: legacy optimization attribute. + + shape: New shape + """ + + schema = get_schema("Reshape", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Reshape", schema) + return op( + *self._prepare_inputs(schema, data), consumed_inputs=consumed_inputs, shape=shape + ) + + def Selu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.673200011253357, + consumed_inputs: Optional[Sequence[int]] = None, + gamma: float = 1.0506999492645264, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Selu(1)](https://onnx.ai/onnx/operators/onnx__Selu.html#selu-1 "Online Documentation") + + + Selu takes one input data (Tensor) and produces one output data + (Tensor) where the scaled exponential linear unit function, + `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`, + is applied to the tensor elementwise. + + + Args: + X: Input tensor + + alpha: Coefficient of SELU default to 1.6732. + + consumed_inputs: legacy optimization attribute. + + gamma: Coefficient of SELU default to 1.0507. + """ + + schema = get_schema("Selu", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Selu", schema) + return op( + *self._prepare_inputs(schema, X), + alpha=alpha, + consumed_inputs=consumed_inputs, + gamma=gamma, + ) + + def Shape( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 Shape(1)](https://onnx.ai/onnx/operators/onnx__Shape.html#shape-1 "Online Documentation") + + + Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. + + + Args: + data: An input tensor. + """ + + schema = get_schema("Shape", 1, "") + op: Callable[..., INT64] = Op(self, "Shape", schema) + return op(*self._prepare_inputs(schema, data)) + + def Sigmoid( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sigmoid(1)](https://onnx.ai/onnx/operators/onnx__Sigmoid.html#sigmoid-1 "Online Documentation") + + + Sigmoid takes one input data (Tensor) and produces one output data + (Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the + tensor elementwise. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Sigmoid", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sigmoid", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Size( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 Size(1)](https://onnx.ai/onnx/operators/onnx__Size.html#size-1 "Online Documentation") + + + Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor. + + + Args: + data: An input tensor. + """ + + schema = get_schema("Size", 1, "") + op: Callable[..., INT64] = Op(self, "Size", schema) + return op(*self._prepare_inputs(schema, data)) + + def Slice( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[Sequence[int]] = None, + ends: Optional[Sequence[int]] = None, + starts: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Slice(1)](https://onnx.ai/onnx/operators/onnx__Slice.html#slice-1 "Online Documentation") + + + Produces a slice of the input tensor along multiple axes. Similar to numpy: + https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html + Slices uses `axes`, `starts` and `ends` attributes to specify the start and end + dimension for each axis in the list of axes, it uses this information to + slice the input `data` tensor. If a negative value is passed for any of the + start or end indices, it represent number of elements before the end of that + dimension. If the value passed to start or end is larger than the `n` (the + number of elements in this dimension), it represents `n`. For slicing to the + end of a dimension with unknown size, it is recommended to pass in `INT_MAX`. + If `axes` are omitted, they are set to `[0, ..., ndim-1]`. + Example 1: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + result = [ + [5, 6, 7], + ] + Example 2: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + starts = [0, 1] + ends = [-1, 1000] + result = [ + [2, 3, 4], + ] + + + Args: + data: Tensor of data to extract slices from. + + axes: Axes that `starts` and `ends` apply to. It's optional. If not present, + will be treated as [0, 1, ..., len(`starts`) - 1]. + + ends: Ending indices (exclusive) of corresponding axis in axes` + + starts: Starting indices of corresponding axis in `axes` + """ + + schema = get_schema("Slice", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Slice", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, ends=ends, starts=starts) + + def Softmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Softmax(1)](https://onnx.ai/onnx/operators/onnx__Softmax.html#softmax-1 "Online Documentation") + + + The operator computes the softmax (normalized exponential) values for each layer in the batch + of the given input. The input is a 2-D tensor (Tensor) of size + (batch_size x input_feature_dimensions). The output tensor has the same shape + and contains the softmax values of the corresponding input. + + Input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size + """ + + schema = get_schema("Softmax", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Softmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Softplus(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Softplus(1)](https://onnx.ai/onnx/operators/onnx__Softplus.html#softplus-1 "Online Documentation") + + + Softplus takes one input data (Tensor) and produces one output data + (Tensor) where the softplus function, y = ln(exp(x) + 1), is applied to + the tensor elementwise. + + + Args: + X: (differentiable) 1D input tensor + """ + + schema = get_schema("Softplus", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Softplus", schema) + return op(*self._prepare_inputs(schema, X)) + + def Softsign(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Softsign(1)](https://onnx.ai/onnx/operators/onnx__Softsign.html#softsign-1 "Online Documentation") + + + Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Softsign", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Softsign", schema) + return op(*self._prepare_inputs(schema, input)) + + def SpaceToDepth( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + blocksize: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 SpaceToDepth(1)](https://onnx.ai/onnx/operators/onnx__SpaceToDepth.html#spacetodepth-1 "Online Documentation") + + SpaceToDepth rearranges blocks of spatial data into depth. More specifically, + this op outputs a copy of the input tensor where values from the height and width dimensions + are moved to the depth dimension. + + + Args: + input: Input tensor of [N,C,H,W], where N is the batch axis, C is the + channel or depth, H is the height and W is the width. + + blocksize: Blocks of [blocksize, blocksize] are moved. + """ + + schema = get_schema("SpaceToDepth", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "SpaceToDepth", schema) + return op(*self._prepare_inputs(schema, input), blocksize=blocksize) + + def Split( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + split_: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + axis: Optional[int] = None, + split: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Split(1)](https://onnx.ai/onnx/operators/onnx__Split.html#split-1 "Online Documentation") + + Split a tensor into a list of tensors, along the specified + 'axis'. The lengths of the split can be specified using argument 'axis' or + optional second input blob to the operator. Otherwise, the tensor is split + to equal sized parts. + + + Args: + input: The tensor to split + + split_: (optional) Optional list of output lengths (see also arg 'split') + + axis: Which axis to split on + + split: length of each output + """ + + schema = get_schema("Split", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Split", schema) + return op(*self._prepare_inputs(schema, input, split_), axis=axis, split=split) + + def Sqrt( + self, X: Union[DOUBLE, FLOAT, FLOAT16], consumed_inputs: Optional[Sequence[int]] = None + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sqrt(1)](https://onnx.ai/onnx/operators/onnx__Sqrt.html#sqrt-1 "Online Documentation") + + + Square root takes one input data (Tensor) and produces one output data + (Tensor) where the square root is, y = x^0.5, is applied to + the tensor elementwise. If x is negative, then it will return NaN. + + + Args: + X: Input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Sqrt", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sqrt", schema) + return op(*self._prepare_inputs(schema, X), consumed_inputs=consumed_inputs) + + def Squeeze( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Squeeze(1)](https://onnx.ai/onnx/operators/onnx__Squeeze.html#squeeze-1 "Online Documentation") + + + Remove single-dimensional entries from the shape of a tensor. + Takes a parameter `axes` with a list of axes to squeeze. + If `axes` is not provided, all the single dimensions will be removed from + the shape. If an axis is selected with shape entry not equal to one, an error is raised. + + + Args: + data: Tensors with at least max(dims) dimensions. + + axes: List of non-negative integers, indicate the dimensions to squeeze. + """ + + schema = get_schema("Squeeze", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Squeeze", schema) + return op(*self._prepare_inputs(schema, data), axes=axes) + + def Sub( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + axis: Optional[int] = None, + broadcast: int = 0, + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sub(1)](https://onnx.ai/onnx/operators/onnx__Sub.html#sub-1 "Online Documentation") + + + Performs element-wise binary subtraction (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Sub", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sub", schema) + return op( + *self._prepare_inputs(schema, A, B), + axis=axis, + broadcast=broadcast, + consumed_inputs=consumed_inputs, + ) + + def Sum( + self, + *data_0: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sum(1)](https://onnx.ai/onnx/operators/onnx__Sum.html#sum-1 "Online Documentation") + + + Element-wise sum of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Sum. + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Sum", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sum", schema) + return op(*self._prepare_inputs(schema, *data_0), consumed_inputs=consumed_inputs) + + def Tanh( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + consumed_inputs: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Tanh(1)](https://onnx.ai/onnx/operators/onnx__Tanh.html#tanh-1 "Online Documentation") + + + Calculates the hyperbolic tangent of the given input tensor element-wise. + + + Args: + input: 1-D input tensor + + consumed_inputs: legacy optimization attribute. + """ + + schema = get_schema("Tanh", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Tanh", schema) + return op(*self._prepare_inputs(schema, input), consumed_inputs=consumed_inputs) + + def Tile( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + tiles: Union[DOUBLE, FLOAT, FLOAT16], + axis: Union[DOUBLE, FLOAT, FLOAT16], + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Tile(1)](https://onnx.ai/onnx/operators/onnx__Tile.html#tile-1 "Online Documentation") + + Repeat the elements of a tensor along an axis. + + Args: + input: Input tensor of any shape. + + tiles: Number of repeated copies to make of the input tensor. + + axis: Axis along which to repeat. + """ + + schema = get_schema("Tile", 1, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Tile", schema) + return op(*self._prepare_inputs(schema, input, tiles, axis)) + + def TopK( + self, X: Union[DOUBLE, FLOAT, FLOAT16], axis: int = -1, k: Optional[int] = None + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]: + r"""[🌐 TopK(1)](https://onnx.ai/onnx/operators/onnx__TopK.html#topk-1 "Online Documentation") + + + Retrieve the top-K elements along a specified axis. Given an input tensor of + shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: + -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] + which contains the values of the top k elements along the specified axis + -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which + contains the indices of the top k elements (original indices from the input + tensor). + Given two equivalent values, this operator uses the indices along the axis as + a tiebreaker. That is, the element with the lower index will appear first. + + + Args: + X: Tensor of shape [a_1, a_2, ..., a_n, r] + + axis: Dimension on which to do the sort. + + k: Number of top elements to retrieve + """ + + schema = get_schema("TopK", 1, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]] = Op( + self, "TopK", schema + ) + return op(*self._prepare_inputs(schema, X), axis=axis, k=k) + + def Transpose( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + perm: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Transpose(1)](https://onnx.ai/onnx/operators/onnx__Transpose.html#transpose-1 "Online Documentation") + + + Transpose the input tensor similar to numpy.transpose. For example, when + perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape + will be (2, 1, 3). + + + Args: + data: An input tensor. + + perm: A list of integers. By default, reverse the dimensions, otherwise + permute the axes according to the values given. + """ + + schema = get_schema("Transpose", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Transpose", schema) + return op(*self._prepare_inputs(schema, data), perm=perm) + + def Unsqueeze( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Unsqueeze(1)](https://onnx.ai/onnx/operators/onnx__Unsqueeze.html#unsqueeze-1 "Online Documentation") + + + Insert single-dimensional entries to the shape of a tensor. + Takes one required argument `axes`, a list of dimensions that will be inserted. + Dimension indices in `axes` are as seen in the output tensor. For example: + Given a tensor such that tensor with shape [3, 4, 5], then + Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1] + + + Args: + data: Original tensor + + axes: List of non-negative integers, indicate the dimensions to be inserted + """ + + schema = get_schema("Unsqueeze", 1, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Unsqueeze", schema) + return op(*self._prepare_inputs(schema, data), axes=axes) + + def Upsample( + self, + X: Union[BOOL, DOUBLE, FLOAT, FLOAT16, INT32, INT64], + height_scale: Optional[float] = None, + mode: str = "nearest", + width_scale: Optional[float] = None, + ) -> Union[BOOL, DOUBLE, FLOAT, FLOAT16, INT32, INT64]: + r"""[🌐 Upsample(1)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-1 "Online Documentation") + + + Upsample the input tensor. + The width and height of the output tensor are: + output_width = floor(input_width * width_scale), + output_height = floor(input_height * height_scale). + Example: + Given `data` tensor, width_scale, height_scale, mode, + Upsample the input 4-D tensor in nearest mode: + data = [[[ + [1, 2], + [3, 4] + ]]] + width_scale = 2 + height_scale = 2 + mode = "nearest" + output = [[[ + [1, 1, 2, 2], + [1, 1, 2, 2], + [3, 3, 4, 4], + [3, 3, 4, 4] + ]]] + + + Args: + X: 4-D tensor, [N,C,H,W] + + height_scale: The scale along height dimension. It takes value greater than + or equal to 1. + + mode: Two interpolation modes: nearest(default), bilinear + + width_scale: The scale along width dimension. It takes value greater than or + equal to 1. + """ + + schema = get_schema("Upsample", 1, "") + op: Callable[..., Union[BOOL, DOUBLE, FLOAT, FLOAT16, INT32, INT64]] = Op( + self, "Upsample", schema + ) + return op( + *self._prepare_inputs(schema, X), + height_scale=height_scale, + mode=mode, + width_scale=width_scale, + ) + + def Xor(self, A: BOOL, B: BOOL, axis: Optional[int] = None, broadcast: int = 0) -> BOOL: + r"""[🌐 Xor(1)](https://onnx.ai/onnx/operators/onnx__Xor.html#xor-1 "Online Documentation") + + + Returns the tensor resulted from performing the `xor` logical operation + elementwise on the input tensors `A` and `B`. + + If broadcasting is enabled, the right-hand-side argument will be broadcasted + to match the shape of left-hand-side argument. See the doc of `Add` for a + detailed description of the broadcasting rules. + + + Args: + A: Left input tensor for the logical operator. + + B: Right input tensor for the logical operator. + + axis: If set, defines the broadcast dimensions. + + broadcast: Enable broadcasting + """ + + schema = get_schema("Xor", 1, "") + op: Callable[..., BOOL] = Op(self, "Xor", schema) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) diff --git a/onnxscript/onnx_opset/_impl/opset10.py b/onnxscript/onnx_opset/_impl/opset10.py new file mode 100644 index 0000000000..4c70fd9177 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset10.py @@ -0,0 +1,1362 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset9 import Opset9 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset10(Opset9): + def __new__(cls): + return Opset.__new__(cls, "", 10) + + def __init__(self): + super().__init__() + + def AveragePool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + count_include_pad: int = 0, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 AveragePool(10)](https://onnx.ai/onnx/operators/onnx__AveragePool.html#averagepool-10 "Online Documentation") + + + AveragePool consumes an input tensor X and applies average pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + average pooling consisting of computing the average on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + count_include_pad: Whether include pad pixels when calculating values for + the edges. Default is 0, doesn't count include pad. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("AveragePool", 10, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "AveragePool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def ConvInteger( + self, + x: Union[INT8, UINT8], + w: Union[INT8, UINT8], + x_zero_point: Optional[Union[INT8, UINT8]] = None, + w_zero_point: Optional[Union[INT8, UINT8]] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> INT32: + r"""[🌐 ConvInteger(10)](https://onnx.ai/onnx/operators/onnx__ConvInteger.html#convinteger-10 "Online Documentation") + + + The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, + and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. + + + Args: + x: Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + w: The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based + indices for the shape array). Or in other words FILTER_IN_CHANNEL should + be equal to DATA_CHANNEL. + + x_zero_point: (optional) Zero point tensor for input 'x'. It's optional and + default value is 0. It's a scalar, which means a per-tensor/layer + quantization. + + w_zero_point: (optional) Zero point tensor for input 'w'. It's optional and + default value is 0. It could be a scalar or a 1-D tensor, which means a + per-tensor/layer or per output channel quantization. If it's a 1-D + tensor, its number of elements should be equal to the number of output + channels (M) + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + dilations: dilation value along each spatial axis of the filter. If not + present, the dilation defaults to 1 along each axis. + + group: number of groups input channels and output channels are divided into. + default is 1. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input 'w'. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0.The value represent the number + of pixels added to the beginning and end part of the corresponding + axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number ofpixels added at the beginning + of axis `i` and xi_end, the number of pixels added at the end of axis + `i`.This attribute cannot be used simultaneously with auto_pad + attribute. If not present, the padding defaultsto 0 along start and end + of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each axis. + """ + + schema = get_schema("ConvInteger", 10, "") + op: Callable[..., INT32] = Op(self, "ConvInteger", schema) + return op( + *self._prepare_inputs(schema, x, w, x_zero_point, w_zero_point), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def DequantizeLinear( + self, + x: Union[INT32, INT8, UINT8], + x_scale: FLOAT, + x_zero_point: Optional[Union[INT32, INT8, UINT8]] = None, + ) -> FLOAT: + r"""[🌐 DequantizeLinear(10)](https://onnx.ai/onnx/operators/onnx__DequantizeLinear.html#dequantizelinear-10 "Online Documentation") + + + The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor. + The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' are both scalars. + 'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32, + there's no zero point (zero point is supposed to be 0). + + + Args: + x: N-D quantized input tensor to be de-quantized. + + x_scale: Scale for input 'x'. It's a scalar, which means a per-tensor/layer + quantization. + + x_zero_point: (optional) Zero point for input 'x'. It's a scalar, which + means a per-tensor/layer quantization. It's optional. 0 is the default + value when it's not specified. + """ + + schema = get_schema("DequantizeLinear", 10, "") + op: Callable[..., FLOAT] = Op(self, "DequantizeLinear", schema) + return op(*self._prepare_inputs(schema, x, x_scale, x_zero_point)) + + def Dropout( + self, data: Union[DOUBLE, FLOAT, FLOAT16], ratio: float = 0.5 + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], BOOL]: + r"""[🌐 Dropout(10)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-10 "Online Documentation") + + + Dropout takes one input floating tensor and produces two tensor outputs, + output (floating tensor) and mask (`Tensor`). Depending on whether it is + in test mode or not, the output Y will either be a random dropout, or a simple + copy of the input. Note that our implementation of Dropout does scaling in + the training phase, so during testing nothing needs to be done. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + data: The input data as Tensor. + + ratio: The ratio of random dropout + """ + + schema = get_schema("Dropout", 10, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], BOOL]] = Op( + self, "Dropout", schema + ) + return op(*self._prepare_inputs(schema, data), ratio=ratio) + + def IsInf( + self, X: Union[DOUBLE, FLOAT], detect_negative: int = 1, detect_positive: int = 1 + ) -> BOOL: + r"""[🌐 IsInf(10)](https://onnx.ai/onnx/operators/onnx__IsInf.html#isinf-10 "Online Documentation") + + Map infinity to true and other values to false. + + Args: + X: (non-differentiable) input + + detect_negative: (Optional) Whether map negative infinity to true. Default + to 1 so that negative infinity induces true. Set this attribute to 0 if + negative infinity should be mapped to false. + + detect_positive: (Optional) Whether map positive infinity to true. Default + to 1 so that positive infinity induces true. Set this attribute to 0 if + positive infinity should be mapped to false. + """ + + schema = get_schema("IsInf", 10, "") + op: Callable[..., BOOL] = Op(self, "IsInf", schema) + return op( + *self._prepare_inputs(schema, X), + detect_negative=detect_negative, + detect_positive=detect_positive, + ) + + def MatMulInteger( + self, + A: Union[INT8, UINT8], + B: Union[INT8, UINT8], + a_zero_point: Optional[Union[INT8, UINT8]] = None, + b_zero_point: Optional[Union[INT8, UINT8]] = None, + ) -> INT32: + r"""[🌐 MatMulInteger(10)](https://onnx.ai/onnx/operators/onnx__MatMulInteger.html#matmulinteger-10 "Online Documentation") + + + Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. + The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. + + + Args: + A: (non-differentiable) N-dimensional matrix A + + B: (non-differentiable) N-dimensional matrix B + + a_zero_point: (optional, non-differentiable) Zero point tensor for input + 'A'. It's optional and default value is 0. It could be a scalar or N-D + tensor. Scalar refers to per tensor quantization whereas N-D refers to + per row quantization. If the input is 2D of shape [M, K] then zero point + tensor may be an M element vector [zp_1, zp_2, ..., zp_M]. If the input + is N-D tensor with shape [D1, D2, M, K] then zero point tensor may have + shape [D1, D2, M, 1]. + + b_zero_point: (optional, non-differentiable) Zero point tensor for input + 'B'. It's optional and default value is 0. It could be a scalar or a N-D + tensor, Scalar refers to per tensor quantization whereas N-D refers to + per col quantization. If the input is 2D of shape [K, N] then zero point + tensor may be an N element vector [zp_1, zp_2, ..., zp_N]. If the input + is N-D tensor with shape [D1, D2, K, N] then zero point tensor may have + shape [D1, D2, 1, N]. + """ + + schema = get_schema("MatMulInteger", 10, "") + op: Callable[..., INT32] = Op(self, "MatMulInteger", schema) + return op(*self._prepare_inputs(schema, A, B, a_zero_point, b_zero_point)) + + def MaxPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + dilations: Optional[Sequence[int]] = None, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + storage_order: int = 0, + strides: Optional[Sequence[int]] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]: + r"""[🌐 MaxPool(10)](https://onnx.ai/onnx/operators/onnx__MaxPool.html#maxpool-10 "Online Documentation") + + + MaxPool consumes an input tensor X and applies max pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + max pooling consisting of computing the max on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + ``` + The output of each pooling window is maximum number of elements exclude pad. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + dilations: Dilation value along each spatial axis of filter. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + storage_order: The storage order of the tensor. 0 is row major, and 1 is + column major. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("MaxPool", 10, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]] = Op( + self, "MaxPool", schema + ) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + pads=pads, + storage_order=storage_order, + strides=strides, + ) + + def Mod( + self, + A: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + B: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + fmod: int = 0, + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Mod(10)](https://onnx.ai/onnx/operators/onnx__Mod.html#mod-10 "Online Documentation") + + + Performs element-wise binary modulus (with Numpy-style broadcasting support). + The sign of the remainder is the same as that of the Divisor. + + Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend + (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. + This attribute is set to 0 by default causing the behavior to be like integer mod. + Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). + + If the input type is floating point, then `fmod` attribute must be set to 1. + + In case of dividend being zero, the results will be platform dependent. + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: Dividend tensor + + B: Divisor tensor + + fmod: Whether the operator should behave like fmod (default=0 meaning it + will do integer mods); Set this to 1 to force fmod treatment + """ + + schema = get_schema("Mod", 10, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Mod", schema) + return op(*self._prepare_inputs(schema, A, B), fmod=fmod) + + def NonMaxSuppression( + self, + boxes: FLOAT, + scores: FLOAT, + max_output_boxes_per_class: Optional[INT64] = None, + iou_threshold: Optional[FLOAT] = None, + score_threshold: Optional[FLOAT] = None, + center_point_box: int = 0, + ) -> INT64: + r"""[🌐 NonMaxSuppression(10)](https://onnx.ai/onnx/operators/onnx__NonMaxSuppression.html#nonmaxsuppression-10 "Online Documentation") + + + Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. + Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. + Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to + orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system + result in the same boxes being selected by the algorithm. + The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. + The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation. + + + Args: + boxes: An input tensor with shape [num_batches, spatial_dimension, 4]. The + single box data format is indicated by center_point_box. + + scores: An input tensor with shape [num_batches, num_classes, + spatial_dimension] + + max_output_boxes_per_class: (optional) Integer representing the maximum + number of boxes to be selected per batch per class. It is a scalar. + Default to 0, which means no output. + + iou_threshold: (optional) Float representing the threshold for deciding + whether boxes overlap too much with respect to IOU. It is scalar. Value + range [0, 1]. Default to 0. + + score_threshold: (optional) Float representing the threshold for deciding + when to remove boxes based on score. It is a scalar. + + center_point_box: Integer indicate the format of the box data. The default + is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) + and (y2, x2) are the coordinates of any diagonal pair of box corners and + the coordinates can be provided as normalized (i.e., lying in the + interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box + data is supplied as [x_center, y_center, width, height]. Mostly used for + Pytorch models. + """ + + schema = get_schema("NonMaxSuppression", 10, "") + op: Callable[..., INT64] = Op(self, "NonMaxSuppression", schema) + return op( + *self._prepare_inputs( + schema, + boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + ), + center_point_box=center_point_box, + ) + + def QLinearConv( + self, + x: Union[INT8, UINT8], + x_scale: FLOAT, + x_zero_point: Union[INT8, UINT8], + w: Union[INT8, UINT8], + w_scale: FLOAT, + w_zero_point: Union[INT8, UINT8], + y_scale: FLOAT, + y_zero_point: Union[INT8, UINT8], + B: Optional[INT32] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[INT8, UINT8]: + r"""[🌐 QLinearConv(10)](https://onnx.ai/onnx/operators/onnx__QLinearConv.html#qlinearconv-10 "Online Documentation") + + + The convolution operator consumes a quantized input tensor, its scale and zero point, + a quantized filter, its scale and zero point, and output's scale and zero point, + and computes the quantized output. Each scale and zero-point pair must have same shape. + It means they must be either scalars (per tensor) or 1-D tensors (per output channel). + Each input or output and its related zero point must have same type. + When bias is present it must be quantized using scale = input scale * weight scale and + zero point as 0. + + + Args: + x: Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + x_scale: Scale tensor for input 'x'. It's a scalar, which means a + per-tensor/layer quantization. + + x_zero_point: Zero point tensor for input 'x'. It's a scalar, which means a + per-tensor/layer quantization. + + w: The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based + indices for the shape array). Or in other words FILTER_IN_CHANNEL should + be equal to DATA_CHANNEL. + + w_scale: Scale tensor for input 'w'. It could be a scalar or a 1-D tensor, + which means a per-tensor/layer or per output channel quantization. If + it's a 1-D tensor, its number of elements should be equal to the number + of output channels (M). + + w_zero_point: Zero point tensor for input 'w'. It could be a scalar or a 1-D + tensor, which means a per-tensor/layer or per output channel + quantization. If it's a 1-D tensor, its number of elements should be + equal to the number of output channels (M). + + y_scale: Scale tensor for output 'y'. It's a scalar, which means a + per-tensor/layer quantization. + + y_zero_point: Zero point tensor for output 'y'. It's a scalar, which means a + per-tensor/layer quantization. + + B: (optional) Optional 1D bias to be added to the convolution, has size of + M. Bias must be quantized using scale = x_scale * w_scale and zero_point + = 0 + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + dilations: dilation value along each spatial axis of the filter. If not + present, the dilation defaults to 1 along each spatial axis. + + group: number of groups input channels and output channels are divided into. + default is 1. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input 'w'. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0.The value represent the number + of pixels added to the beginning and end part of the corresponding + axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number ofpixels added at the beginning + of axis `i` and xi_end, the number of pixels added at the end of axis + `i`.This attribute cannot be used simultaneously with auto_pad + attribute. If not present, the padding defaultsto 0 along start and end + of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("QLinearConv", 10, "") + op: Callable[..., Union[INT8, UINT8]] = Op(self, "QLinearConv", schema) + return op( + *self._prepare_inputs( + schema, + x, + x_scale, + x_zero_point, + w, + w_scale, + w_zero_point, + y_scale, + y_zero_point, + B, + ), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def QLinearMatMul( + self, + a: Union[INT8, UINT8], + a_scale: FLOAT, + a_zero_point: Union[INT8, UINT8], + b: Union[INT8, UINT8], + b_scale: FLOAT, + b_zero_point: Union[INT8, UINT8], + y_scale: FLOAT, + y_zero_point: Union[INT8, UINT8], + ) -> Union[INT8, UINT8]: + r"""[🌐 QLinearMatMul(10)](https://onnx.ai/onnx/operators/onnx__QLinearMatMul.html#qlinearmatmul-10 "Online Documentation") + + + Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. + It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, + and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). + For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. + Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor + (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row + or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be + an M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K] + for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may + have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. + Production must never overflow, and accumulation may overflow if and only if in 32 bits. + + + Args: + a: (non-differentiable) N-dimensional quantized matrix a + + a_scale: (non-differentiable) scale of quantized input a + + a_zero_point: (non-differentiable) zero point of quantized input a + + b: (non-differentiable) N-dimensional quantized matrix b + + b_scale: (non-differentiable) scale of quantized input b + + b_zero_point: (non-differentiable) zero point of quantized input b + + y_scale: (non-differentiable) scale of quantized output y + + y_zero_point: (non-differentiable) zero point of quantized output y + """ + + schema = get_schema("QLinearMatMul", 10, "") + op: Callable[..., Union[INT8, UINT8]] = Op(self, "QLinearMatMul", schema) + return op( + *self._prepare_inputs( + schema, + a, + a_scale, + a_zero_point, + b, + b_scale, + b_zero_point, + y_scale, + y_zero_point, + ) + ) + + def QuantizeLinear( + self, + x: Union[FLOAT, INT32], + y_scale: FLOAT, + y_zero_point: Optional[Union[INT8, UINT8]] = None, + ) -> Union[INT8, UINT8]: + r"""[🌐 QuantizeLinear(10)](https://onnx.ai/onnx/operators/onnx__QuantizeLinear.html#quantizelinear-10 "Online Documentation") + + + The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor. + The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. + For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type. + + + Args: + x: N-D full precision Input tensor to be quantized. + + y_scale: Scale for doing quantization to get 'y'. It's a scalar, which means + a per-tensor/layer quantization. + + y_zero_point: (optional) Zero point for doing quantization to get 'y'. It's + a scalar, which means a per-tensor/layer quantization. Default value is + uint8 typed 0 if it's not specified. + """ + + schema = get_schema("QuantizeLinear", 10, "") + op: Callable[..., Union[INT8, UINT8]] = Op(self, "QuantizeLinear", schema) + return op(*self._prepare_inputs(schema, x, y_scale, y_zero_point)) + + def Resize( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + scales: FLOAT, + mode: str = "nearest", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Resize(10)](https://onnx.ai/onnx/operators/onnx__Resize.html#resize-10 "Online Documentation") + + + Resize the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * scale). + + + Args: + X: N-D tensor + + scales: The scale array along each dimension. It takes value greater than 0. + If it's less than 1, it's sampling down, otherwise, it's upsampling. The + number of elements of 'scales' should be the same as the rank of input + 'X'. + + mode: Two interpolation modes: nearest (default), and linear (including + bilinear, trilinear, etc) + """ + + schema = get_schema("Resize", 10, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Resize", schema) + return op(*self._prepare_inputs(schema, X, scales), mode=mode) + + def ReverseSequence( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + sequence_lens: INT64, + batch_axis: int = 1, + time_axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ReverseSequence(10)](https://onnx.ai/onnx/operators/onnx__ReverseSequence.html#reversesequence-10 "Online Documentation") + + + Reverse batch of sequences having different lengths specified by `sequence_lens`. + + For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, + and copies elements whose index's beyond sequence_lens[i] to the output. So the output slice i contains reversed + sequences on the first sequence_lens[i] elements, then have original values copied for the other elements. + + Example 1: + input = [[0.0, 4.0, 8.0, 12.0], + [1.0, 5.0, 9.0, 13.0], + [2.0, 6.0, 10.0, 14.0], + [3.0, 7.0, 11.0, 15.0]] + sequence_lens = [4, 3, 2, 1] + time_axis = 0 + batch_axis = 1 + + output = [[3.0, 6.0, 9.0, 12.0], + [2.0, 5.0, 8.0, 13.0], + [1.0, 4.0, 10.0, 14.0], + [0.0, 7.0, 11.0, 15.0]] + + Example 2: + input = [[0.0, 1.0, 2.0, 3.0 ], + [4.0, 5.0, 6.0, 7.0 ], + [8.0, 9.0, 10.0, 11.0], + [12.0, 13.0, 14.0, 15.0]] + sequence_lens = [1, 2, 3, 4] + time_axis = 1 + batch_axis = 0 + + output = [[0.0, 1.0, 2.0, 3.0 ], + [5.0, 4.0, 6.0, 7.0 ], + [10.0, 9.0, 8.0, 11.0], + [15.0, 14.0, 13.0, 12.0]] + + + Args: + input: Tensor of rank r >= 2. + + sequence_lens: Tensor specifying lengths of the sequences in a batch. It has + shape `[batch_size]`. + + batch_axis: (Optional) Specify which axis is batch axis. Must be one of 1 + (default), or 0. + + time_axis: (Optional) Specify which axis is time axis. Must be one of 0 + (default), or 1. + """ + + schema = get_schema("ReverseSequence", 10, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ReverseSequence", schema) + return op( + *self._prepare_inputs(schema, input, sequence_lens), + batch_axis=batch_axis, + time_axis=time_axis, + ) + + def RoiAlign( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + rois: Union[DOUBLE, FLOAT, FLOAT16], + batch_indices: INT64, + mode: str = "avg", + output_height: int = 1, + output_width: int = 1, + sampling_ratio: int = 0, + spatial_scale: float = 1.0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RoiAlign(10)](https://onnx.ai/onnx/operators/onnx__RoiAlign.html#roialign-10 "Online Documentation") + + + Region of Interest (RoI) align operation described in the + [Mask R-CNN paper](https://arxiv.org/abs/1703.06870). + RoiAlign consumes an input tensor X and region of interests (rois) + to apply pooling across each RoI; it produces a 4-D tensor of shape + (num_rois, C, output_height, output_width). + + RoiAlign is proposed to avoid the misalignment by removing + quantizations while converting from original image into feature + map and from feature map into RoI feature; in each ROI bin, + the value of the sampled locations are computed directly + through bilinear interpolation. + + + Args: + X: Input data tensor from the previous operator; 4-D feature map of shape + (N, C, H, W), where N is the batch size, C is the number of channels, + and H and W are the height and the width of the data. + + rois: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape + (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates + are in the coordinate system of the input image. Each coordinate set has + a 1:1 correspondence with the 'batch_indices' input. + + batch_indices: 1-D tensor of shape (num_rois,) with each element denoting + the index of the corresponding image in the batch. + + mode: The pooling method. Two modes are supported: 'avg' and 'max'. Default + is 'avg'. + + output_height: default 1; Pooled output Y's height. + + output_width: default 1; Pooled output Y's width. + + sampling_ratio: Number of sampling points in the interpolation grid used to + compute the output value of each pooled output bin. If > 0, then exactly + sampling_ratio x sampling_ratio grid points are used. If == 0, then an + adaptive number of grid points are used (computed as ceil(roi_width / + output_width), and likewise for height). Default is 0. + + spatial_scale: Multiplicative spatial scale factor to translate ROI + coordinates from their input spatial scale to the scale used when + pooling, i.e., spatial scale of the input feature map X relative to the + input image. E.g.; default is 1.0f. + """ + + schema = get_schema("RoiAlign", 10, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "RoiAlign", schema) + return op( + *self._prepare_inputs(schema, X, rois, batch_indices), + mode=mode, + output_height=output_height, + output_width=output_width, + sampling_ratio=sampling_ratio, + spatial_scale=spatial_scale, + ) + + def Slice( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + starts: Union[INT32, INT64], + ends: Union[INT32, INT64], + axes: Optional[Union[INT32, INT64]] = None, + steps: Optional[Union[INT32, INT64]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Slice(10)](https://onnx.ai/onnx/operators/onnx__Slice.html#slice-10 "Online Documentation") + + + Produces a slice of the input tensor along multiple axes. Similar to numpy: + https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html + Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end + dimension and step for each axis in the list of axes, it uses this information to + slice the input `data` tensor. If a negative value is passed for any of the + start or end indices, it represent number of elements before the end of that + dimension. If the value passed to start or end is larger than the `n` (the + number of elements in this dimension), it represents `n`. For slicing to the + end of a dimension with unknown size, it is recommended to pass in `INT_MAX`. + If a negative value is passed for step, it represents slicing backward. + If `axes` are omitted, they are set to `[0, ..., ndim-1]`. + If `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)` + Example 1: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + steps = [1, 2] + result = [ + [5, 7], + ] + Example 2: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + starts = [0, 1] + ends = [-1, 1000] + result = [ + [2, 3, 4], + ] + + + Args: + data: Tensor of data to extract slices from. + + starts: 1-D tensor of starting indices of corresponding axis in `axes` + + ends: 1-D tensor of ending indices (exclusive) of corresponding axis in + `axes` + + axes: (optional) 1-D tensor of axes that `starts` and `ends` apply to. + + steps: (optional) 1-D tensor of slice step of corresponding axis in `axes`. + Default to 1. + """ + + schema = get_schema("Slice", 10, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Slice", schema) + return op(*self._prepare_inputs(schema, data, starts, ends, axes, steps)) + + def StringNormalizer( + self, + X: STRING, + case_change_action: str = "NONE", + is_case_sensitive: int = 0, + locale: Optional[str] = None, + stopwords: Optional[Sequence[str]] = None, + ) -> STRING: + r"""[🌐 StringNormalizer(10)](https://onnx.ai/onnx/operators/onnx__StringNormalizer.html#stringnormalizer-10 "Online Documentation") + + + StringNormalization performs string operations for basic cleaning. + This operator has only one input (denoted by X) and only one output + (denoted by Y). This operator first examines the elements in the X, + and removes elements specified in "stopwords" attribute. + After removing stop words, the intermediate result can be further lowercased, + uppercased, or just returned depending the "case_change_action" attribute. + This operator only accepts [C]- and [1, C]-tensor. + If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] + if input shape is [C] and shape [1, 1] if input shape is [1, C]. + + + Args: + X: UTF-8 strings to normalize + + case_change_action: string enum that cases output to be + lowercased/uppercases/unchanged. Valid values are "LOWER", "UPPER", + "NONE". Default is "NONE" + + is_case_sensitive: Boolean. Whether the identification of stop words in X is + case-sensitive. Default is false + + locale: Environment dependent string that denotes the locale according to + which output strings needs to be upper/lowercased.Default en_US or + platform specific equivalent as decided by the implementation. + + stopwords: List of stop words. If not set, no word would be removed from X. + """ + + schema = get_schema("StringNormalizer", 10, "") + op: Callable[..., STRING] = Op(self, "StringNormalizer", schema) + return op( + *self._prepare_inputs(schema, X), + case_change_action=case_change_action, + is_case_sensitive=is_case_sensitive, + locale=locale, + stopwords=stopwords, + ) + + def ThresholdedRelu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], alpha: float = 1.0 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 ThresholdedRelu(10)](https://onnx.ai/onnx/operators/onnx__ThresholdedRelu.html#thresholdedrelu-10 "Online Documentation") + + + ThresholdedRelu takes one input data (Tensor) and produces one output data + (Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, + is applied to the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + + alpha: Threshold value + """ + + schema = get_schema("ThresholdedRelu", 10, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "ThresholdedRelu", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha) + + def TopK( + self, X: Union[DOUBLE, FLOAT, FLOAT16], K: INT64, axis: int = -1 + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]: + r"""[🌐 TopK(10)](https://onnx.ai/onnx/operators/onnx__TopK.html#topk-10 "Online Documentation") + + + Retrieve the top-K elements along a specified axis. Given an input tensor of + shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: + -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] + which contains the values of the top k elements along the specified axis + -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which + contains the indices of the top k elements (original indices from the input + tensor). + + Given two equivalent values, this operator uses the indices along the axis as + a tiebreaker. That is, the element with the lower index will appear first. + + + Args: + X: Tensor of shape [a_1, a_2, ..., a_n, r] + + K: A 1-D tensor containing a single positive value corresponding to the + number of top elements to retrieve + + axis: Dimension on which to do the sort. + """ + + schema = get_schema("TopK", 10, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]] = Op( + self, "TopK", schema + ) + return op(*self._prepare_inputs(schema, X, K), axis=axis) + + def Upsample( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + scales: FLOAT, + mode: str = "nearest", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Upsample(10)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-10 "Online Documentation") + + + Upsample the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * scale). + + + Args: + X: N-D tensor + + scales: The scale array along each dimension. It takes value greater than or + equal to 1. The number of elements of 'scales' should be the same as the + rank of input 'X'. + + mode: Two interpolation modes: nearest (default), and linear (including + bilinear, trilinear, etc) + """ + + schema = get_schema("Upsample", 10, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Upsample", schema) + return op(*self._prepare_inputs(schema, X, scales), mode=mode) diff --git a/onnxscript/onnx_opset/_impl/opset11.py b/onnxscript/onnx_opset/_impl/opset11.py new file mode 100644 index 0000000000..b4f76248d3 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset11.py @@ -0,0 +1,4909 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto, SparseTensorProto, TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset10 import Opset10 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset11(Opset10): + def __new__(cls): + return Opset.__new__(cls, "", 11) + + def __init__(self): + super().__init__() + + def ArgMax( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + ) -> INT64: + r"""[🌐 ArgMax(11)](https://onnx.ai/onnx/operators/onnx__ArgMax.html#argmax-11 "Online Documentation") + + + Computes the indices of the max elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ArgMax", 11, "") + op: Callable[..., INT64] = Op(self, "ArgMax", schema) + return op(*self._prepare_inputs(schema, data), axis=axis, keepdims=keepdims) + + def ArgMin( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + ) -> INT64: + r"""[🌐 ArgMin(11)](https://onnx.ai/onnx/operators/onnx__ArgMin.html#argmin-11 "Online Documentation") + + + Computes the indices of the min elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ArgMin", 11, "") + op: Callable[..., INT64] = Op(self, "ArgMin", schema) + return op(*self._prepare_inputs(schema, data), axis=axis, keepdims=keepdims) + + def AveragePool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + count_include_pad: int = 0, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 AveragePool(11)](https://onnx.ai/onnx/operators/onnx__AveragePool.html#averagepool-11 "Online Documentation") + + + AveragePool consumes an input tensor X and applies average pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + average pooling consisting of computing the average on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. Optionally, if dimension + denotation is in effect, the operation expects the input data tensor to + arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, + DATA_FEATURE, DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + count_include_pad: Whether include pad pixels when calculating values for + the edges. Default is 0, doesn't count include pad. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("AveragePool", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "AveragePool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def BitShift( + self, + X: Union[UINT16, UINT32, UINT64, UINT8], + Y: Union[UINT16, UINT32, UINT64, UINT8], + direction: Optional[str] = None, + ) -> Union[UINT16, UINT32, UINT64, UINT8]: + r"""[🌐 BitShift(11)](https://onnx.ai/onnx/operators/onnx__BitShift.html#bitshift-11 "Online Documentation") + + + Bitwise shift operator performs element-wise operation. For each input element, if the + attribute "direction" is "RIGHT", this operator moves its binary representation toward + the right side so that the input value is effectively decreased. If the attribute "direction" + is "LEFT", bits of binary representation moves toward the left side, which results the + increase of its actual value. The input X is the tensor to be shifted and another input + Y specifies the amounts of shifting. For example, if "direction" is "Right", X is [1, 4], + and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" is "LEFT" with + X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. + + Because this operator supports Numpy-style broadcasting, X's and Y's shapes are + not necessarily identical. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + X: (non-differentiable) First operand, input to be shifted. + + Y: (non-differentiable) Second operand, amounts of shift. + + direction: Direction of moving bits. It can be either "RIGHT" (for right + shift) or "LEFT" (for left shift). + """ + + schema = get_schema("BitShift", 11, "") + op: Callable[..., Union[UINT16, UINT32, UINT64, UINT8]] = Op(self, "BitShift", schema) + return op(*self._prepare_inputs(schema, X, Y), direction=direction) + + def Clip( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + min: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + max: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Clip(11)](https://onnx.ai/onnx/operators/onnx__Clip.html#clip-11 "Online Documentation") + + + Clip operator limits the given input within an interval. The interval is + specified by the inputs 'min' and 'max'. They default to + numeric_limits::lowest() and numeric_limits::max(), respectively. + + + Args: + input: Input tensor whose elements to be clipped + + min: (optional) Minimum value, under which element is replaced by min. It + must be a scalar(tensor of empty shape). + + max: (optional) Maximum value, above which element is replaced by max. It + must be a scalar(tensor of empty shape). + """ + + schema = get_schema("Clip", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Clip", schema) + return op(*self._prepare_inputs(schema, input, min, max)) + + def Compress( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + condition: BOOL, + axis: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Compress(11)](https://onnx.ai/onnx/operators/onnx__Compress.html#compress-11 "Online Documentation") + + + Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. + In case axis is not provided, input is flattened before elements are selected. + Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html + + + Args: + input: (differentiable) Tensor of rank r >= 1. + + condition: (non-differentiable) Rank 1 tensor of booleans to indicate which + slices or data elements to be selected. Its length can be less than the + input length along the axis or the flattened input size if axis is not + specified. In such cases data slices or elements exceeding the condition + length are discarded. + + axis: (Optional) Axis along which to take slices. If not specified, input is + flattened before elements being selected. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + """ + + schema = get_schema("Compress", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Compress", schema) + return op(*self._prepare_inputs(schema, input, condition), axis=axis) + + def Concat( + self, + *inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Concat(11)](https://onnx.ai/onnx/operators/onnx__Concat.html#concat-11 "Online Documentation") + + Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. + + Args: + inputs: (variadic) List of tensors for concatenation + + axis: Which axis to concat on. A negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(inputs).. + """ + + schema = get_schema("Concat", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Concat", schema) + return op(*self._prepare_inputs(schema, *inputs), axis=axis) + + def ConcatFromSequence( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + axis: Optional[int] = None, + new_axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ConcatFromSequence(11)](https://onnx.ai/onnx/operators/onnx__ConcatFromSequence.html#concatfromsequence-11 "Online Documentation") + + + Concatenate a sequence of tensors into a single tensor. + All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. + By default 'new_axis' is 0, the behavior is similar to numpy.concatenate. + When 'new_axis' is 1, the behavior is similar to numpy.stack. + + + Args: + input_sequence: Sequence of tensors for concatenation + + axis: Which axis to concat on. Accepted range in `[-r, r - 1]`, where `r` is + the rank of input tensors. When `new_axis` is 1, accepted range is `[-r + - 1, r]`. + + new_axis: Insert and concatenate on a new axis or not, default 0 means do + not insert new axis. + """ + + schema = get_schema("ConcatFromSequence", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ConcatFromSequence", schema) + return op(*self._prepare_inputs(schema, input_sequence), axis=axis, new_axis=new_axis) + + def Constant( + self, + sparse_value: Optional[SparseTensorProto] = None, + value: Optional[TensorProto] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Constant(11)](https://onnx.ai/onnx/operators/onnx__Constant.html#constant-11 "Online Documentation") + + + A constant tensor. Exactly one of the two attributes, either value or sparse_value, + must be specified. + + + Args: + sparse_value: The value for the elements of the output tensor in sparse + format. + + value: The value for the elements of the output tensor. + """ + + schema = get_schema("Constant", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Constant", schema) + return op(sparse_value=sparse_value, value=value) + + def Conv( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Conv(11)](https://onnx.ai/onnx/operators/onnx__Conv.html#conv-11 "Online Documentation") + + + The convolution operator consumes an input tensor and a filter, and + computes the output. + + Args: + X: (differentiable) Input data tensor from previous layer; has size (N x C x + H x W), where N is the batch size, C is the number of channels, and H + and W are the height and width. Note that this is for the 2D image. + Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if + dimension denotation is in effect, the operation expects input data + tensor to arrive with the dimension denotation of [DATA_BATCH, + DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...]. + + W: (differentiable) The weight tensor that will be used in the convolutions; + has size (M x C/group x kH x kW), where C is the number of channels, and + kH and kW are the height and width of the kernel, and M is the number of + feature maps. For more than 2 dimensions, the kernel shape will be (M x + C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension + of the kernel. Optionally, if dimension denotation is in effect, the + operation expects the weight tensor to arrive with the dimension + denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, + FILTER_SPATIAL ...]. Assuming zero based indices for the shape array, + X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in + other words FILTER_IN_CHANNEL multiplied by the number of groups should + be equal to DATA_CHANNEL and the number of feature maps M should be a + multiple of the number of groups G. + + B: (optional, differentiable) Optional 1D bias to be added to the + convolution, has size of M. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + dilations: dilation value along each spatial axis of the filter. If not + present, the dilation defaults is 1 along each spatial axis. + + group: number of groups input channels and output channels are divided into. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input W. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + is 1 along each spatial axis. + """ + + schema = get_schema("Conv", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Conv", schema) + return op( + *self._prepare_inputs(schema, X, W, B), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def ConvTranspose( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + auto_pad: str = "NOTSET", + dilations: Optional[Sequence[int]] = None, + group: int = 1, + kernel_shape: Optional[Sequence[int]] = None, + output_padding: Optional[Sequence[int]] = None, + output_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 ConvTranspose(11)](https://onnx.ai/onnx/operators/onnx__ConvTranspose.html#convtranspose-11 "Online Documentation") + + + The convolution transpose operator consumes an input tensor and a filter, + and computes the output. + + If the pads parameter is provided the shape of the output is calculated via the following equation: + + output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i] + + output_shape can also be explicitly specified in which case pads values are auto generated using these equations: + + total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] + If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) + Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2). + + + + Args: + X: (differentiable) Input data tensor from previous layer; has size (N x C x + H x W), where N is the batch size, C is the number of channels, and H + and W are the height and width. Note that this is for the 2D image. + Otherwise the size is (N x C x D1 x D2 ... x Dn) + + W: (differentiable) The weight tensor that will be used in the convolutions; + has size (C x M/group x kH x kW), where C is the number of channels, and + kH and kW are the height and width of the kernel, and M is the number of + feature maps. For more than 2 dimensions, the weight shape will be (C x + M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the + dimension of the kernel. The number of channels in the output should be + equal to W.shape[1] * group (assuming zero based indices of the shape + array) + + B: (optional, differentiable) Optional 1D bias to be added to the + convolution, has size of M. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + input_shape[i] * strides[i]` for each axis `i`. The padding is split + between the two sides equally or almost equally (depending on whether it + is even or odd). In case the padding is an odd number, the extra padding + is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. + + dilations: dilation value along each spatial axis of the filter. If not + present, the dilation defaults to 1 along each spatial axis. + + group: number of groups input channels and output channels are divided into. + + kernel_shape: The shape of the convolution kernel. If not present, should be + inferred from input W. + + output_padding: Additional elements added to the side with higher coordinate + indices in the output. Each padding value in "output_padding" must be + less than the corresponding stride/dilation dimension. By default, this + attribute is a zero vector. Note that this attribute doesn't directly + affect the computed output values. It only controls the selection of the + computed values, so changing this attribute only adds or removes output + elements. If "output_shape" is explicitly provided, "output_padding" + does not contribute additional size to "output_shape" but participates + in the computation of the needed padding amount. This is also called + adjs or adjustment in some frameworks. + + output_shape: The shape of the output can be explicitly set which will cause + pads values to be auto generated. If output_shape is specified pads + values are ignored. See doc for details for equations to generate pads + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("ConvTranspose", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "ConvTranspose", schema) + return op( + *self._prepare_inputs(schema, X, W, B), + auto_pad=auto_pad, + dilations=dilations, + group=group, + kernel_shape=kernel_shape, + output_padding=output_padding, + output_shape=output_shape, + pads=pads, + strides=strides, + ) + + def CumSum( + self, + x: Union[DOUBLE, FLOAT, INT32, INT64, UINT32, UINT64], + axis: Union[INT32, INT64], + exclusive: int = 0, + reverse: int = 0, + ) -> Union[DOUBLE, FLOAT, INT32, INT64, UINT32, UINT64]: + r"""[🌐 CumSum(11)](https://onnx.ai/onnx/operators/onnx__CumSum.html#cumsum-11 "Online Documentation") + + + Performs cumulative sum of the input elements along the given axis. + By default, it will do the sum inclusively meaning the first element is copied as is. + Through an `exclusive` attribute, this behavior can change to exclude the first element. + It can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1. + + Example: + :: + + input_x = [1, 2, 3] + axis=0 + output = [1, 3, 6] + exclusive=1 + output = [0, 1, 3] + exclusive=0 + reverse=1 + output = [6, 5, 3] + exclusive=1 + reverse=1 + output = [5, 3, 0] + + + + + Args: + x: (differentiable) An input tensor that is to be processed. + + axis: (non-differentiable) A 0-D tensor. Must be in the range [-rank(x), + rank(x)-1]. Negative value means counting dimensions from the back. + + exclusive: If set to 1 will return exclusive sum in which the top element is + not included. In other terms, if set to 1, the j-th output element would + be the sum of the first (j-1) elements. Otherwise, it would be the sum + of the first j elements. + + reverse: If set to 1 will perform the sums in reverse direction. + """ + + schema = get_schema("CumSum", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, INT32, INT64, UINT32, UINT64]] = Op( + self, "CumSum", schema + ) + return op(*self._prepare_inputs(schema, x, axis), exclusive=exclusive, reverse=reverse) + + def DepthToSpace( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + blocksize: Optional[int] = None, + mode: str = "DCR", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 DepthToSpace(11)](https://onnx.ai/onnx/operators/onnx__DepthToSpace.html#depthtospace-11 "Online Documentation") + + DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. + This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of + the input tensor where values from the depth dimension are moved in spatial blocks to the height + and width dimensions. By default, `mode` = `DCR`. + In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the + following order: depth, column, and then row. The output y is computed from the input x as below: + + b, c, h, w = x.shape + + tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) + + tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) + + y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) + + + In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the + following order: column, row, and the depth. The output y is computed from the input x as below: + + b, c, h, w = x.shape + + tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) + + tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) + + y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) + + + + Args: + input: Input tensor of [N,C,H,W], where N is the batch axis, C is the + channel or depth, H is the height and W is the width. + + blocksize: Blocks of [blocksize, blocksize] are moved. + + mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for + column-row-depth order. + """ + + schema = get_schema("DepthToSpace", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "DepthToSpace", schema) + return op(*self._prepare_inputs(schema, input), blocksize=blocksize, mode=mode) + + def Det(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Det(11)](https://onnx.ai/onnx/operators/onnx__Det.html#det-11 "Online Documentation") + + + Det calculates determinant of a square matrix or batches of square matrices. + Det takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions, + and the inner-most 2 dimensions form square matrices. + The output is a tensor of shape `[*]`, containing the determinants of all input submatrices. + e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`). + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Det", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Det", schema) + return op(*self._prepare_inputs(schema, X)) + + def DynamicQuantizeLinear(self, x: FLOAT) -> Tuple[UINT8, FLOAT, UINT8]: + r"""[🌐 DynamicQuantizeLinear(11)](https://onnx.ai/onnx/operators/onnx__DynamicQuantizeLinear.html#dynamicquantizelinear-11 "Online Documentation") + + + A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. + Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. + Scale is calculated as: + :: + + y_scale = (max(x) - min(x))/(qmax - qmin) + * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 + * data range is adjusted to include 0. + + + Zero point is calculated as: + :: + + intermediate_zero_point = qmin - min(x)/y_scale + y_zero_point = cast(round(saturate(itermediate_zero_point))) + * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 + * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. + * rounding to nearest ties to even. + + + Data quantization formula is: + :: + + y = saturate (round (x / y_scale) + y_zero_point) + * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. + * rounding to nearest ties to even. + + + + + Args: + x: Input tensor + """ + + schema = get_schema("DynamicQuantizeLinear", 11, "") + op: Callable[..., Tuple[UINT8, FLOAT, UINT8]] = Op( + self, "DynamicQuantizeLinear", schema + ) + return op(*self._prepare_inputs(schema, x)) + + def Equal( + self, + A: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 Equal(11)](https://onnx.ai/onnx/operators/onnx__Equal.html#equal-11 "Online Documentation") + + + Returns the tensor resulted from performing the `equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Equal", 11, "") + op: Callable[..., BOOL] = Op(self, "Equal", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Flatten( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 1, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Flatten(11)](https://onnx.ai/onnx/operators/onnx__Flatten.html#flatten-11 "Online Documentation") + + + Flattens the input tensor into a 2D matrix. If input tensor has shape + (d_0, d_1, ... d_n) then the output will have shape + (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). + + + Args: + input: A tensor of rank >= axis. + + axis: Indicate up to which input dimensions (exclusive) should be flattened + to the outer dimension of the output. The value for axis must be in the + range [-r, r], where r is the rank of the input tensor. Negative value + means counting dimensions from the back. When axis = 0, the shape of the + output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input + tensor is (d_0, d_1, ... d_n). + """ + + schema = get_schema("Flatten", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Flatten", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Gather( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Gather(11)](https://onnx.ai/onnx/operators/onnx__Gather.html#gather-11 "Online Documentation") + + + Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather + entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates + them in an output tensor of rank q + (r - 1). + + axis = 0 : + + Let + k = indices[i_{0}, ..., i_{q-1}] + Then + output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}] + + :: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + indices = [ + [0, 1], + [1, 2], + ] + output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], + ] + + + axis = 1 : + + Let + k = indices[i_{0}, ..., i_{q-1}] + Then + output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}] + + :: + + data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], + ] + indices = [ + [0, 2], + ] + axis = 1, + output = [ + [ + [1.0, 1.9], + [2.3, 3.9], + [4.5, 5.9], + ], + ] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of int32/int64 indices, of any rank q. All index values are + expected to be within bounds [-s, s-1] along axis of size s. It is an + error if any of the index values are out of bounds. + + axis: Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("Gather", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Gather", schema) + return op(*self._prepare_inputs(schema, data, indices), axis=axis) + + def GatherElements( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GatherElements(11)](https://onnx.ai/onnx/operators/onnx__GatherElements.html#gatherelements-11 "Online Documentation") + + + + GatherElements takes two inputs `data` and `indices` of the same rank r >= 1 + and an optional attribute `axis` that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). It is an indexing operation + that produces its output by indexing into the input data tensor at index + positions determined by elements of the `indices` tensor. + Its output shape is the same as the shape of `indices` and consists of one value + (gathered from the `data`) for each element in `indices`. + + For instance, in the 3-D case (r = 3), the output produced is determined + by the following equations: + :: + + out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, + out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, + out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, + + + + This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation. + + Example 1: + :: + + data = [ + [1, 2], + [3, 4], + ] + indices = [ + [0, 0], + [1, 0], + ] + axis = 1 + output = [ + [ + [1, 1], + [4, 3], + ], + ] + + + Example 2: + :: + + data = [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ] + indices = [ + [1, 2, 0], + [2, 0, 0], + ] + axis = 0 + output = [ + [ + [4, 8, 3], + [7, 2, 3], + ], + ] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of int32/int64 indices, with the same rank r as the input. + All index values are expected to be within bounds [-s, s-1] along axis + of size s. It is an error if any of the index values are out of bounds. + + axis: Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("GatherElements", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GatherElements", schema) + return op(*self._prepare_inputs(schema, data, indices), axis=axis) + + def GatherND( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GatherND(11)](https://onnx.ai/onnx/operators/onnx__GatherND.html#gathernd-11 "Online Documentation") + + + Given `data` tensor of rank `r` >= 1, and `indices` tensor of rank `q` >= 1, this operator gathers + slices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1`. + + `indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, + where each element defines a slice of `data` + + Some salient points about the inputs' rank and shape: + + 1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q` + + 2) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r` (inclusive) + + 3) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`. + It is an error if any of the index values are out of bounds. + + The output is computed as follows: + + The output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`. + + 1) If `indices_shape[-1] > r` => error condition + + 2) If `indices_shape[-1] == r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor + containing 1-D tensors of dimension `r`. Let us think of each such `r` ranked tensor as `indices_slice`. + Each *scalar value* corresponding to `data[indices_slice]` is filled into the corresponding location of the `(q-1)`-dimensional tensor + to form the `output` tensor (Example 1 below) + + 3) If `indices_shape[-1] < r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor + containing 1-D tensors of dimension `< r`. Let us think of each such tensors as `indices_slice`. + Each *tensor slice* corresponding to `data[indices_slice , :]` is filled into the corresponding location of the `(q-1)`-dimensional tensor + to form the `output` tensor (Examples 2, 3, and 4 below) + + This operator is the inverse of `ScatterND`. + + `Example 1` + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[0,0],[1,1]] # indices_shape = [2, 2] + + output = [0,3] # output_shape = [2] + + `Example 2` + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[1],[0]] # indices_shape = [2, 1] + + output = [[2,3],[0,1]] # output_shape = [2, 2] + + `Example 3` + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[0,1],[1,0]] # indices_shape = [2, 2] + + output = [[2,3],[4,5]] # output_shape = [2, 2] + + `Example 4` + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] + + output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of rank q >= 1. All index values are expected to be within + bounds [-s, s-1] along axis of size s. It is an error if any of the + index values are out of bounds. + """ + + schema = get_schema("GatherND", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GatherND", schema) + return op(*self._prepare_inputs(schema, data, indices)) + + def Gemm( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + C: Optional[Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = None, + alpha: float = 1.0, + beta: float = 1.0, + transA: int = 0, + transB: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Gemm(11)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-11 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + + A' = transpose(A) if transA else A + + B' = transpose(B) if transB else B + + Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), + input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), + and output tensor Y has shape (M, N). A will be transposed before doing the + computation if attribute transA is non-zero, same for B and transB. + This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check `Broadcasting in ONNX `_. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + A: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) + if transA is non-zero. + + B: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) + if transB is non-zero. + + C: (optional) Optional input tensor C. If not specified, the computation is + done as if C is a scalar 0. The shape of C should be unidirectional + broadcastable to (M, N). + + alpha: Scalar multiplier for the product of input tensors A * B. + + beta: Scalar multiplier for input tensor C. + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Gemm", schema + ) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + transA=transA, + transB=transB, + ) + + def Hardmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Hardmax(11)](https://onnx.ai/onnx/operators/onnx__Hardmax.html#hardmax-11 "Online Documentation") + + + The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch + of the given input. + + The input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. The output tensor has the same shape + and contains the hardmax values of the corresponding input. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(input). + """ + + schema = get_schema("Hardmax", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Hardmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def If( + self, + cond: BOOL, + else_branch: Optional[GraphProto] = None, + then_branch: Optional[GraphProto] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 If(11)](https://onnx.ai/onnx/operators/onnx__If.html#if-11 "Online Documentation") + + If conditional + + Args: + cond: Condition for the if + + else_branch: Graph to run if condition is false. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the then_branch. + + then_branch: Graph to run if condition is true. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the else_branch. + """ + + schema = get_schema("If", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "If", schema) + return op( + *self._prepare_inputs(schema, cond), + else_branch=else_branch, + then_branch=then_branch, + ) + + def LogSoftmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LogSoftmax(11)](https://onnx.ai/onnx/operators/onnx__LogSoftmax.html#logsoftmax-11 "Online Documentation") + + + The operator computes the logsoftmax (log of softmax) values for each layer in the batch + of the given input. + + The input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. The output tensor has the same shape + and contains the logsoftmax values of the corresponding input. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(input). + """ + + schema = get_schema("LogSoftmax", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LogSoftmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Loop( + self, + M: Optional[INT64], + cond: Optional[BOOL], + *v_initial: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Loop(11)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-11 "Online Documentation") + + + Generic Looping construct. This loop has multiple termination conditions: + + 1) Trip count. Iteration count specified at runtime. Set by + specifying the input M. Optional. Set to empty string to omit. + Note that a static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. + 2) Loop termination condition. This is an input to the op that determines + whether to run the first iteration and also a loop-carried dependency for + the body graph. The body graph must yield a value for the condition variable, + whether this input is provided or not. + + This table summarizes the operating modes of this operator with equivalent + C-style code: + + Operator inputs defined as (max_trip_count, condition_var). + + input ("", ""): + for (int i=0; ; ++i) { + cond = ... // Note this value is ignored, but is required in the body + } + + input ("", cond) // Note this is analogous to a while loop + bool cond = ...; + for (int i=0; cond; ++i) { + cond = ...; + } + + input ("", 1) // Note this is analogous to a do-while loop + bool cond = true + for (int i=0; cond; ++i) { + cond = ...; + } + + input (trip_count, "") // Note this is analogous to a for loop + int trip_count = ... + for (int i=0; i < trip_count; ++i) { + cond = ...; // ignored + } + + input (trip_count, cond) + int trip_count = ...; + bool cond = ...; + for (int i=0; i < trip_count && cond; ++i) { + cond = ...; + } + + + *Sample usage - cond as well as trip count* + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + + *Sample equivalent C code* + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + + There are several things of note in this code snippet: + + 1) Values from the enclosing scope (i.e. variable "a" here) are in scope and can + be referenced in the inputs of the loop. + 2) Any values computed in the loop body that needs to be used in a subsequent + iteration or after the loop are modelled using a pair of variables in the loop-body, + consisting of an input variable (eg., b_in) and an output variable (eg., b_out). + These are referred to as loop-carried dependences. The loop operation node + supplies the input value of the input variable for the first iteration, and + returns the output value of the output variable produced by the final + iteration. + 3) Scan_output variables are used to implicitly concatenate values computed across + all the iterations. In the above example, the value of user_defined_val computed + over all iterations are concatenated and returned as the value of user_defined_vals + after the loop. + 4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + + Note that the semantics of this op support "diagonal" or "wavefront" execution. + (See Step 3 here for an example: + https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). + Frontends should emit multi-layer RNNs as a series of While operators (with + time being the inner looping dimension), with each successive layer consuming + the scan_outputs from the previous layer, possibly going through several + point-wise operators (e.g. dropout, residual connections, linear layer). + + + Args: + M: (optional) A maximum trip-count for the loop specified at runtime. + Optional. Pass empty string to skip. + + cond: (optional) A boolean termination condition. Optional. Pass empty + string to skip. + + v_initial: (variadic, heterogeneous) The initial values of any loop-carried + dependencies (values that change across loop iterations) + + body: The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + """ + + schema = get_schema("Loop", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Loop", schema) + return op(*self._prepare_inputs(schema, M, cond, *v_initial), body=body) + + def LpPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + p: int = 2, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LpPool(11)](https://onnx.ai/onnx/operators/onnx__LpPool.html#lppool-11 "Online Documentation") + + + LpPool consumes an input tensor X and applies Lp pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + Lp pooling consisting of computing the Lp norm on all values of a subset + of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + kernel_shape: The size of the kernel along each axis. + + p: p value of the Lp norm used to pool over the input data. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("LpPool", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LpPool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + p=p, + pads=pads, + strides=strides, + ) + + def MaxPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + dilations: Optional[Sequence[int]] = None, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + storage_order: int = 0, + strides: Optional[Sequence[int]] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]: + r"""[🌐 MaxPool(11)](https://onnx.ai/onnx/operators/onnx__MaxPool.html#maxpool-11 "Online Documentation") + + + MaxPool consumes an input tensor X and applies max pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + max pooling consisting of computing the max on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + ``` + The output of each pooling window is maximum number of elements exclude pad. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + dilations: Dilation value along each spatial axis of filter. If not present, + the dilation defaults to 1 along each spatial axis. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + storage_order: The storage order of the tensor. 0 is row major, and 1 is + column major. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("MaxPool", 11, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]] = Op( + self, "MaxPool", schema + ) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + pads=pads, + storage_order=storage_order, + strides=strides, + ) + + def MaxUnpool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + I: INT64, + output_shape: Optional[INT64] = None, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MaxUnpool(11)](https://onnx.ai/onnx/operators/onnx__MaxUnpool.html#maxunpool-11 "Online Documentation") + + + MaxUnpool essentially computes the partial inverse of the MaxPool op. + The input information to this op is typically the output information from a MaxPool op. The first + input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) + from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding + to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. + The third (optional) input is a tensor that specifies the output size of the unpooling operation. + + MaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal + values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling + the result of an unpooling operation should give back the original input to the unpooling op. + + MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous. + The third input argument, output_size, is meant to disambiguate the op and produce output tensor of + known/predictable size. + + In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads, + which define the exact unpooling op. The attributes typically have the same values as the corrsponding + pooling op that the unpooling op is trying to invert. + + + Args: + X: (differentiable) Input data tensor that has to be unpooled. This tensor + is typically the first output of the MaxPool op.Dimensions for image + case are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For + non-image case, the dimensions are in the form of (N x C x D1 x D2 ... + Dn), where N is the batch size. Optionally, if dimension denotation is + in effect, the operation expects the input data tensor to arrive with + the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + I: (non-differentiable) Input data tensor containing the indices + corresponding to elements in the first input tensor X.This tensor is + typically the second output of the MaxPool op.Dimensions must be the + same as input tensor X. The indices are linear, i.e. computed + considering the tensor as flattened 1-D tensor, assuming row-major + storage. Also, the linear indices should not consider padding. So the + values in indices are in the range [0, N x C x D1 x ... x Dn). + + output_shape: (optional, non-differentiable) The shape of the output can be + explicitly set which will cause pads values to be auto generated. If + 'output_shape' is specified, 'pads' values are ignored. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("MaxUnpool", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "MaxUnpool", schema) + return op( + *self._prepare_inputs(schema, X, I, output_shape), + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def NonMaxSuppression( + self, + boxes: FLOAT, + scores: FLOAT, + max_output_boxes_per_class: Optional[INT64] = None, + iou_threshold: Optional[FLOAT] = None, + score_threshold: Optional[FLOAT] = None, + center_point_box: int = 0, + ) -> INT64: + r"""[🌐 NonMaxSuppression(11)](https://onnx.ai/onnx/operators/onnx__NonMaxSuppression.html#nonmaxsuppression-11 "Online Documentation") + + + Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. + Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. + Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to + orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system + result in the same boxes being selected by the algorithm. + The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. + The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation. + + + Args: + boxes: An input tensor with shape [num_batches, spatial_dimension, 4]. The + single box data format is indicated by center_point_box. + + scores: An input tensor with shape [num_batches, num_classes, + spatial_dimension] + + max_output_boxes_per_class: (optional) Integer representing the maximum + number of boxes to be selected per batch per class. It is a scalar. + Default to 0, which means no output. + + iou_threshold: (optional) Float representing the threshold for deciding + whether boxes overlap too much with respect to IOU. It is scalar. Value + range [0, 1]. Default to 0. + + score_threshold: (optional) Float representing the threshold for deciding + when to remove boxes based on score. It is a scalar. + + center_point_box: Integer indicate the format of the box data. The default + is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) + and (y2, x2) are the coordinates of any diagonal pair of box corners and + the coordinates can be provided as normalized (i.e., lying in the + interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box + data is supplied as [x_center, y_center, width, height]. Mostly used for + Pytorch models. + """ + + schema = get_schema("NonMaxSuppression", 11, "") + op: Callable[..., INT64] = Op(self, "NonMaxSuppression", schema) + return op( + *self._prepare_inputs( + schema, + boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + ), + center_point_box=center_point_box, + ) + + def OneHot( + self, + indices: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + depth: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + values: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = -1, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 OneHot(11)](https://onnx.ai/onnx/operators/onnx__OneHot.html#onehot-11 "Online Documentation") + + + Produces a one-hot tensor based on inputs. + The locations represented by the index values in the 'indices' input tensor will have 'on_value' + and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value' + are specified as part of required input argument 'values', which is a two-element tensor of format + [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the + input tensor. The additional dimension is for one-hot representation. The additional dimension will + be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional + dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional + dimension is specified by required scalar input 'depth'. The type of the output tensor is the same + as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside + the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the + output tensor. + + when axis = 0: + output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise. + + when axis = -1: + output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise. + + + + Args: + indices: (non-differentiable) Input tensor containing indices. Any entries + in the 'indices' input tensor with values outside the range [-depth, + depth-1] will result in one-hot representation with all 'off_value' + values in the output tensor.In case 'indices' is of non-integer type, + the values will be casted to int64 before use. + + depth: (non-differentiable) Scalar specifying the number of classes in + one-hot tensor. This is also the size of the one-hot dimension + (specified by 'axis' attribute) added on in the output tensor. The + values in the 'indices' input tensor are expected to be in the range + [-depth, depth-1]. In case 'depth' is of non-integer type, it will be + casted to int64 before use. + + values: (non-differentiable) Rank 1 tensor containing exactly two elements, + in the format [off_value, on_value], where 'on_value' is the value used + for filling locations specified in 'indices' input tensor, and + 'off_value' is the value used for filling locations other than those + specified in 'indices' input tensor. + + axis: (Optional) Axis along which one-hot representation in added. Default: + axis=-1. axis=-1 means that the additional dimension will be inserted as + the innermost/last dimension in the output tensor. Negative value means + counting dimensions from the back. Accepted range is [-r-1, r] where r = + rank(indices). + """ + + schema = get_schema("OneHot", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "OneHot", schema) + return op(*self._prepare_inputs(schema, indices, depth, values), axis=axis) + + def Pad( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + pads: INT64, + constant_value: Optional[ + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + mode: str = "constant", + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Pad(11)](https://onnx.ai/onnx/operators/onnx__Pad.html#pad-11 "Online Documentation") + + + Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, + a padded tensor (`output`) is generated. + + The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`): + + 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0) + + 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis + + 3) `edge` - pads with the edge values of array + + + Example 1 (`constant` mode): + Insert 0 pads to the beginning of the second dimension. + + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = + [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + + + Example 2 (`reflect` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = + [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + + + Example 3 (`edge` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = + [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + + + + Args: + data: Input tensor. + + pads: Tensor of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D input + tensor, it is the number of pixels. `pads` should be a 1D tensor of + shape [2 * input_rank]. `pads` format should be: [x1_begin, + x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad + values added at the beginning of axis `i` and xi_end, the number of pad + values added at the end of axis `i`. + + constant_value: (optional) (Optional) A scalar value to be used if the mode + chosen is `constant` (by default it is 0). + + mode: Supported modes: `constant`(default), `reflect`, `edge` + """ + + schema = get_schema("Pad", 11, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Pad", schema) + return op(*self._prepare_inputs(schema, data, pads, constant_value), mode=mode) + + def Range( + self, + start: Union[DOUBLE, FLOAT, INT16, INT32, INT64], + limit: Union[DOUBLE, FLOAT, INT16, INT32, INT64], + delta: Union[DOUBLE, FLOAT, INT16, INT32, INT64], + ) -> Union[DOUBLE, FLOAT, INT16, INT32, INT64]: + r"""[🌐 Range(11)](https://onnx.ai/onnx/operators/onnx__Range.html#range-11 "Online Documentation") + + + Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta` + up to `limit` (exclusive). + + The number of elements in the output of range is computed as below- + + `number_of_elements = max( ceil( (limit - start) / delta ) , 0 )` + + The pseudocode determining the contents of the output is shown below- + + `for(int i=0; i Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL1(11)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-11 "Online Documentation") + + + Computes the L1 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL1", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceL1", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceL2( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL2(11)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-11 "Online Documentation") + + + Computes the L2 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL2", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceL2", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSum( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSum(11)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-11 "Online Documentation") + + + Computes the log sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSum", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceLogSum", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSumExp( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSumExp(11)](https://onnx.ai/onnx/operators/onnx__ReduceLogSumExp.html#reducelogsumexp-11 "Online Documentation") + + + Computes the log sum exponent of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSumExp", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceLogSumExp", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMax( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMax(11)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-11 "Online Documentation") + + + Computes the max of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMax", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMax", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMean( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMean(11)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-11 "Online Documentation") + + + Computes the mean of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMean", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMean", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMin( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMin(11)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-11 "Online Documentation") + + + Computes the min of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMin", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceMin", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceProd( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceProd(11)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-11 "Online Documentation") + + + Computes the product of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceProd", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceProd", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceSum( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSum(11)](https://onnx.ai/onnx/operators/onnx__ReduceSum.html#reducesum-11 "Online Documentation") + + + Computes the sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceSum", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceSum", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceSumSquare( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSumSquare(11)](https://onnx.ai/onnx/operators/onnx__ReduceSumSquare.html#reducesumsquare-11 "Online Documentation") + + + Computes the sum square of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceSumSquare", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "ReduceSumSquare", schema + ) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def Resize( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + roi: Union[DOUBLE, FLOAT, FLOAT16], + scales: FLOAT, + sizes: Optional[INT64] = None, + coordinate_transformation_mode: str = "half_pixel", + cubic_coeff_a: float = -0.75, + exclude_outside: int = 0, + extrapolation_value: float = 0.0, + mode: str = "nearest", + nearest_mode: str = "round_prefer_floor", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Resize(11)](https://onnx.ai/onnx/operators/onnx__Resize.html#resize-11 "Online Documentation") + + + Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \"sizes\" is not specified. + + + Args: + X: N-D tensor + + roi: 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is + the rank of X. The RoIs' coordinates are normalized in the coordinate + system of the input image. It only takes effect when + coordinate_transformation_mode is "tf_crop_and_resize" + + scales: The scale array along each dimension. It takes value greater than 0. + If it's less than 1, it's sampling down, otherwise, it's upsampling. The + number of elements of 'scales' should be the same as the rank of input + 'X'. If 'size' is needed, the user must set 'scales' to an empty tensor. + + sizes: (optional) The size of the output tensor. The number of elements of + 'sizes' should be the same as the rank of input 'X'. May only be set if + 'scales' is set to an empty tensor. + + coordinate_transformation_mode: + This attribute describes how to transform + the coordinate in the resized tensor to the coordinate in the original + tensor.
+ + The coordinate of each dimension is transformed + individually. Let's describe a case using axis x as an example. + Denote + x_resized as the coordinate of axis x in the resized tensor, x_original + as the coordinate of axis x in the original tensor, length_original as + the length of the original tensor in axis x, length_resized as the + length of the resized tensor in axis x, roi_x = (start_x, end_x) of the + axis x in input "roi", scale = length_resized / length_original,
+ if coordinate_transformation_mode is "half_pixel",
+ x_original = + (x_resized + 0.5) / scale - 0.5,
+ + if + coordinate_transformation_mode is "pytorch_half_pixel",
+ x_original + = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0,
+ + if + coordinate_transformation_mode is "align_corners",
+ x_original = + x_resized * (length_original - 1) / (length_resized - 1),
+ + if + coordinate_transformation_mode is "asymmetric",
+ x_original = + x_resized / scale,
+ + if coordinate_transformation_mode is + "tf_half_pixel_for_nn",
+ x_original = (x_resized + 0.5) / scale, +
+ + if coordinate_transformation_mode is "tf_crop_and_resize",
+ x_original = length_resized > 1 ? start_x * (length_original - 1) + + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized + - 1) : 0.5 * (start_x + end_x) * (length_original - 1). + + cubic_coeff_a: The coefficient 'a' used in cubic interpolation. Two common + choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). + Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 + for the details. This attribute is valid only if "mode" is "cubic". + + exclude_outside: If set to 1, the weight of sampling locations outside the + tensor will be set to 0 and the weight will be renormalized so that + their sum is 1.0. The default value is 0. + + extrapolation_value: When coordinate_transformation_mode is + "tf_crop_and_resize" and x_original is outside the range [0, + length_original - 1], this value is used as the corresponding output + value. Default is 0.0f. + + mode: Three interpolation modes: nearest (default), linear and cubic. The + "linear" mode includes linear interpolation for 1D tensor and N-linear + interpolation for N-D tensor (for example, bilinear interpolation for 2D + tensor). The "cubic" mode includes cubic interpolation for 1D tensor and + N-cubic interpolation for N-D tensor (for example, bicubic interpolation + for 2D tensor). + + nearest_mode: Four modes: round_prefer_floor (default, as known as round + half down), round_prefer_ceil (as known as round half up), floor, ceil. + Only used by nearest interpolation. It indicates how to get "nearest" + pixel in input tensor from x_original, so this attribute is valid only + if "mode" is "nearest". + """ + + schema = get_schema("Resize", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Resize", schema) + return op( + *self._prepare_inputs(schema, X, roi, scales, sizes), + coordinate_transformation_mode=coordinate_transformation_mode, + cubic_coeff_a=cubic_coeff_a, + exclude_outside=exclude_outside, + extrapolation_value=extrapolation_value, + mode=mode, + nearest_mode=nearest_mode, + ) + + def Round(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Round(11)](https://onnx.ai/onnx/operators/onnx__Round.html#round-11 "Online Documentation") + + + Round takes one input Tensor and rounds the values, element-wise, meaning + it finds the nearest integer for each value. + In case of halfs, the rule is to round them to the nearest even integer. + The output tensor has the same shape and type as the input. + + Examples: + :: + + round([0.9]) = [1.0] + round([2.5]) = [2.0] + round([2.3]) = [2.0] + round([1.5]) = [2.0] + round([-4.5]) = [-4.0] + + + + + Args: + X: (non-differentiable) Input tensor + """ + + schema = get_schema("Round", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Round", schema) + return op(*self._prepare_inputs(schema, X)) + + def Scan( + self, + *initial_state_and_scan_inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + num_scan_inputs: Optional[int] = None, + scan_input_axes: Optional[Sequence[int]] = None, + scan_input_directions: Optional[Sequence[int]] = None, + scan_output_axes: Optional[Sequence[int]] = None, + scan_output_directions: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scan(11)](https://onnx.ai/onnx/operators/onnx__Scan.html#scan-11 "Online Documentation") + + + Scan can be used to iterate over one or more scan_input tensors, + constructing zero or more scan_output tensors. It combines ideas from general recurrences, + functional programming constructs such as scan, fold, map, and zip, and is intended to enable + generalizations of RNN-like constructs for sequence-to-sequence processing. + Other tensors (referred to as state_variables here) can be used to carry a state + when iterating from one element to another (similar to hidden-state in RNNs, also referred + to as loop-carried dependences in the context of loops). + Many common usages involve a single scan_input tensor (where functionality + similar to scan, fold and map can be obtained). When more than one scan_input is used, + a behavior similar to zip is obtained. + + The attribute body must be a graph, specifying the computation to be performed in + every iteration. It takes as input the current values of the state_variables and + the current iterated element of the scan_inputs. It must return the (updated) values + of the state_variables and zero or more scan_output_element tensors. The values of the + scan_output_element tensors are concatenated over all the iterations to produce the + scan_output values of the scan construct (similar to the concatenated intermediate + hidden-state values of RNN-like constructs). All the output tensors (state_variables as + well as scan_output_element tensors) are required to have the same shape in each iteration + of the loop (a restriction imposed to enable efficient memory allocation). + + Note that the iterated element passed to the body subgraph does not have a sequence + axis. It will have a rank one less than the rank of the corresponding scan_input. + + The scan operation returns the final values of the state_variables as well as the + scan_outputs. + + The optional attribute scan_input_directions specifies the direction (forward or backward) + for each scan input. If this attribute is omitted, all sequences are scanned in the forward + direction. A bidirectional scan may be performed by specifying the same tensor input twice + in the scan_inputs, once with a forward direction, and once with a backward direction. + + The scan_output of the operation is produced by concatenating the scan_output_element + values produced by the body in each iteration. The optional attribute scan_output_directions + specifies the direction in which scan_output is constructed (by appending or prepending the + scan_output_element to scan_output in each iteration) for each scan_output. If this attribute + is omitted, the scan_output_element is appended to the scan_output in each iteration. + + The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. + If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the + batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. + Note that scanning a non-zero axis may be less efficient than scanning axis zero. + + The optional attribute scan_output_axes specifies the axis along which the scan_outputs + are accumulated for each scan_output. For example, if axis 1 is the time axis (to be + scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis + value of 1. + + Note that because of the ONNX restriction that only the last parameter of an operator can + be variadic, the initial-states and scan-inputs are listed together as one input parameter. + Similarly, the final-states and scan-outputs are listed together as one output parameter. + The attribute num_scan_inputs indicates the number M of scan-inputs. + + The behavior of + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + + is equivalent to the following pseudo-code: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + + *Sample usage: Encoding RNN using a Scan* + + The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, + recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can + be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes + %Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these + values are computed in the outer graph, they need to be passed in as extra state_variables. + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + + + + Args: + initial_state_and_scan_inputs: (variadic, heterogeneous) Initial values of + the loop's N state variables followed by M scan_inputs + + body: The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. + + num_scan_inputs: An attribute specifying the number of scan_inputs M. + + scan_input_axes: An optional list of M flags. The i-th element of the list + specifies the axis to be scanned (the sequence axis) for the i-th + scan_input. If omitted, 0 will be used as the scan axis for every + scan_input. Negative value for an axis means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(input). + + scan_input_directions: An optional list of M flags. The i-th element of the + list specifies the direction to be scanned for the i-th scan_input + tensor: 0 indicates forward direction and 1 indicates reverse direction. + If omitted, all scan_input tensors will be scanned in the forward + direction. + + scan_output_axes: An optional list of K flags. The i-th element of the list + specifies the axis for the i-th scan_output. The scan outputs are + accumulated along the specified axis. If omitted, 0 will be used as the + scan axis for every scan_output. Negative value for an axis means + counting dimensions from the back. Accepted range is [-r, r-1]. + + scan_output_directions: An optional list of K flags, one for each + scan_output. The i-th element of the list specifies whether the i-th + scan_output should be constructed by appending or prepending a new value + in each iteration: 0 indicates appending and 1 indicates prepending. If + omitted, all scan_output tensors will be produced by appending a value + in each iteration. + """ + + schema = get_schema("Scan", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scan", schema) + return op( + *self._prepare_inputs(schema, *initial_state_and_scan_inputs), + body=body, + num_scan_inputs=num_scan_inputs, + scan_input_axes=scan_input_axes, + scan_input_directions=scan_input_directions, + scan_output_axes=scan_output_axes, + scan_output_directions=scan_output_directions, + ) + + def Scatter( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scatter(11)](https://onnx.ai/onnx/operators/onnx__Scatter.html#scatter-11 "Online Documentation") + + + This operator is deprecated. Please use ScatterElements, which provides the same functionality. + + Scatter takes three inputs `data`, `updates`, and `indices` of the same + rank r >= 1 and an optional attribute axis that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). The output of the operation + is produced by creating a copy of the input `data`, and then updating its value + to values specified by `updates` at specific index positions specified by + `indices`. Its output shape is the same as the shape of `data`. + + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension = axis is obtained from the value of the corresponding + entry in `indices` and the index-value for dimension != axis is obtained from the + index of the entry itself. + + For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry + is performed as below: + :: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + + + + This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. + + Example 1: + :: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + + + Example 2: + :: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, of r >= 1 (same + rank as input). All index values are expected to be within bounds [-s, + s-1] along axis of size s. It is an error if any of the index values are + out of bounds. + + updates: (differentiable) Tensor of rank r >=1 (same rank and shape as + indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("Scatter", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scatter", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), axis=axis) + + def ScatterElements( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterElements(11)](https://onnx.ai/onnx/operators/onnx__ScatterElements.html#scatterelements-11 "Online Documentation") + + + ScatterElements takes three inputs `data`, `updates`, and `indices` of the same + rank r >= 1 and an optional attribute axis that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). The output of the operation + is produced by creating a copy of the input `data`, and then updating its value + to values specified by `updates` at specific index positions specified by + `indices`. Its output shape is the same as the shape of `data`. + + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension = axis is obtained from the value of the corresponding + entry in `indices` and the index-value for dimension != axis is obtained from the + index of the entry itself. + + For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry + is performed as below: + :: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + + + + This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. + + Example 1: + :: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + + + Example 2: + :: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of int32/int64 indices, of r >= 1 (same rank as input). All + index values are expected to be within bounds [-s, s-1] along axis of + size s. It is an error if any of the index values are out of bounds. + + updates: Tensor of rank r >=1 (same rank and shape as indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("ScatterElements", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterElements", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), axis=axis) + + def ScatterND( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + updates: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterND(11)](https://onnx.ai/onnx/operators/onnx__ScatterND.html#scatternd-11 "Online Documentation") + + + ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, + and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation + is produced by creating a copy of the input `data`, and then updating its value to values + specified by `updates` at specific index positions specified by `indices`. Its output shape + is the same as the shape of `data`. Note that `indices` should not have duplicate entries. + That is, two or more `updates` for the same index-location is not supported. + + `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`. + `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`. + Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an + update to a single element of the tensor. When k is less than rank(data) each update entry specifies an + update to a slice of the tensor. Index values are allowed to be negative, as per the usual + convention for counting backwards from the end, but are expected in the valid range. + + `updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the + first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. + The remaining dimensions of `updates` correspond to the dimensions of the + replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, + corresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates` + must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation + of shapes. + + The `output` is calculated via the following equation: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = updates[idx] + + The order of iteration in the above loop is not specified. + In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. + This ensures that the output value does not depend on the iteration order. + + This operator is the inverse of GatherND. + + Example 1: + :: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + + + + Example 2: + :: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of rank q >= 1. + + updates: Tensor of rank q + r - indices_shape[-1] - 1. + """ + + schema = get_schema("ScatterND", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterND", schema) + return op(*self._prepare_inputs(schema, data, indices, updates)) + + def SequenceAt( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + position: Union[INT32, INT64], + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 SequenceAt(11)](https://onnx.ai/onnx/operators/onnx__SequenceAt.html#sequenceat-11 "Online Documentation") + + + Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. + Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. + Negative value means counting positions from the back. + + + Args: + input_sequence: Input sequence. + + position: Position of the tensor in the sequence. Negative value means + counting positions from the back. Accepted range in `[-n, n - 1]`, where + `n` is the number of tensors in 'input_sequence'. It is an error if any + of the index values are out of bounds. It must be a scalar(tensor of + empty shape). + """ + + schema = get_schema("SequenceAt", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "SequenceAt", schema) + return op(*self._prepare_inputs(schema, input_sequence, position)) + + def SequenceConstruct( + self, + *inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SequenceConstruct(11)](https://onnx.ai/onnx/operators/onnx__SequenceConstruct.html#sequenceconstruct-11 "Online Documentation") + + + Construct a tensor sequence containing 'inputs' tensors. + All tensors in 'inputs' must have the same data type. + + + Args: + inputs: (variadic) Tensors. + """ + + schema = get_schema("SequenceConstruct", 11, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SequenceConstruct", schema) + return op(*self._prepare_inputs(schema, *inputs)) + + def SequenceEmpty( + self, dtype: Optional[int] = None + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SequenceEmpty(11)](https://onnx.ai/onnx/operators/onnx__SequenceEmpty.html#sequenceempty-11 "Online Documentation") + + + Construct an empty tensor sequence, with given data type. + + + Args: + dtype: (Optional) The data type of the tensors in the output sequence. The + default type is 'float'. + """ + + schema = get_schema("SequenceEmpty", 11, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SequenceEmpty", schema) + return op(dtype=dtype) + + def SequenceErase( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + position: Optional[Union[INT32, INT64]] = None, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SequenceErase(11)](https://onnx.ai/onnx/operators/onnx__SequenceErase.html#sequenceerase-11 "Online Documentation") + + + Outputs a tensor sequence that removes the tensor at 'position' from 'input_sequence'. + Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. + Negative value means counting positions from the back. + 'position' is optional, by default it erases the last tensor from 'input_sequence'. + + + Args: + input_sequence: Input sequence. + + position: (optional) Position of the tensor in the sequence. Negative value + means counting positions from the back. Accepted range in `[-n, n - 1]`, + where `n` is the number of tensors in 'input_sequence'. It is an error + if any of the index values are out of bounds. It must be a scalar(tensor + of empty shape). + """ + + schema = get_schema("SequenceErase", 11, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SequenceErase", schema) + return op(*self._prepare_inputs(schema, input_sequence, position)) + + def SequenceInsert( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + tensor: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + position: Optional[Union[INT32, INT64]] = None, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SequenceInsert(11)](https://onnx.ai/onnx/operators/onnx__SequenceInsert.html#sequenceinsert-11 "Online Documentation") + + + Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at 'position'. + 'tensor' must have the same data type as 'input_sequence'. + Accepted range for 'position' is in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'. + Negative value means counting positions from the back. + 'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'. + + + Args: + input_sequence: Input sequence. + + tensor: Input tensor to be inserted into the input sequence. + + position: (optional) Position in the sequence where the new tensor is + inserted. It is optional and default is to insert to the back of the + sequence. Negative value means counting positions from the back. + Accepted range in `[-n, n]`, where `n` is the number of tensors in + 'input_sequence'. It is an error if any of the index values are out of + bounds. It must be a scalar(tensor of empty shape). + """ + + schema = get_schema("SequenceInsert", 11, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SequenceInsert", schema) + return op(*self._prepare_inputs(schema, input_sequence, tensor, position)) + + def SequenceLength( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ) -> INT64: + r"""[🌐 SequenceLength(11)](https://onnx.ai/onnx/operators/onnx__SequenceLength.html#sequencelength-11 "Online Documentation") + + + Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'. + + + Args: + input_sequence: Input sequence. + """ + + schema = get_schema("SequenceLength", 11, "") + op: Callable[..., INT64] = Op(self, "SequenceLength", schema) + return op(*self._prepare_inputs(schema, input_sequence)) + + def Slice( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + starts: Union[INT32, INT64], + ends: Union[INT32, INT64], + axes: Optional[Union[INT32, INT64]] = None, + steps: Optional[Union[INT32, INT64]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Slice(11)](https://onnx.ai/onnx/operators/onnx__Slice.html#slice-11 "Online Documentation") + + + Produces a slice of the input tensor along multiple axes. Similar to numpy: + https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html + Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end + dimension and step for each axis in the list of axes, it uses this information to + slice the input `data` tensor. If a negative value is passed for any of the + start or end indices, it represents number of elements before the end of that + dimension. If the value passed to start or end is larger than the `n` (the + number of elements in this dimension), it represents `n`. For slicing to the + end of a dimension with unknown size, it is recommended to pass in `INT_MAX` + when slicing forward and 'INT_MIN' when slicing backward. + If a negative value is passed for step, it represents slicing backward. + However step value cannot be 0. + If `axes` are omitted, they are set to `[0, ..., ndim-1]`. + If `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)` + Example 1: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + steps = [1, 2] + result = [ + [5, 7], + ] + Example 2: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + starts = [0, 1] + ends = [-1, 1000] + result = [ + [2, 3, 4], + ] + + + Args: + data: Tensor of data to extract slices from. + + starts: 1-D tensor of starting indices of corresponding axis in `axes` + + ends: 1-D tensor of ending indices (exclusive) of corresponding axis in + `axes` + + axes: (optional) 1-D tensor of axes that `starts` and `ends` apply to. + Negative value means counting dimensions from the back. Accepted range + is [-r, r-1] where r = rank(data). + + steps: (optional) 1-D tensor of slice step of corresponding axis in `axes`. + Negative value means slicing backward. 'steps' cannot be 0. Defaults to + 1. + """ + + schema = get_schema("Slice", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Slice", schema) + return op(*self._prepare_inputs(schema, data, starts, ends, axes, steps)) + + def Softmax( + self, input: Union[DOUBLE, FLOAT, FLOAT16], axis: int = 1 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Softmax(11)](https://onnx.ai/onnx/operators/onnx__Softmax.html#softmax-11 "Online Documentation") + + + The operator computes the softmax (normalized exponential) values for each layer in the batch + of the given input. + + The input does not need to explicitly be a 2D vector; rather, it will be + coerced into one. For an arbitrary n-dimensional tensor + input \in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is + the axis provided, then input will be coerced into a 2-dimensional tensor with + dimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default + case where axis=1, this means the input tensor will be coerced into a 2D tensor + of dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size. + In this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D. + Each of these dimensions must be matched correctly, or else the operator + will throw errors. The output tensor has the same shape + and contains the softmax values of the corresponding input. + + + Args: + input: The input tensor that's coerced into a 2D matrix of size (NxD) as + described above. + + axis: Describes the axis of the inputs when coerced to 2D; defaults to one + because the 0th axis most likely describes the batch_size. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(input). + """ + + schema = get_schema("Softmax", 11, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Softmax", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Split( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + split: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Split(11)](https://onnx.ai/onnx/operators/onnx__Split.html#split-11 "Online Documentation") + + Split a tensor into a list of tensors, along the specified + 'axis'. Lengths of the parts can be specified using argument 'split'. + Otherwise, the tensor is split to equal sized parts. + + + Args: + input: The tensor to split + + axis: Which axis to split on. A negative value means counting dimensions + from the back. Accepted range is [-rank, rank-1] where r = rank(input). + + split: length of each output. Values should be >= 0. + """ + + schema = get_schema("Split", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Split", schema) + return op(*self._prepare_inputs(schema, input), axis=axis, split=split) + + def SplitToSequence( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + split: Optional[Union[INT32, INT64]] = None, + axis: int = 0, + keepdims: int = 1, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SplitToSequence(11)](https://onnx.ai/onnx/operators/onnx__SplitToSequence.html#splittosequence-11 "Online Documentation") + + Split a tensor into a sequence of tensors, along the specified + 'axis'. Lengths of the parts can be specified using argument 'split'. + 'split' must contain only positive numbers. + 'split' is either a scalar (tensor of empty shape), or a 1-D tensor. + If 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible). + Last chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible + by 'split'. + Otherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis' + specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the + dimension size of input tensor on 'axis'. + + + Args: + input: The tensor to split + + split: (optional) Length of each output. It can be either a scalar(tensor of + empty shape), or a 1-D tensor. All values must be >= 0. + + axis: Which axis to split on. A negative value means counting dimensions + from the back. Accepted range is [-rank, rank-1]. + + keepdims: Keep the split dimension or not. Default 1, which means we keep + split dimension. If input 'split' is specified, this attribute is + ignored. + """ + + schema = get_schema("SplitToSequence", 11, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SplitToSequence", schema) + return op(*self._prepare_inputs(schema, input, split), axis=axis, keepdims=keepdims) + + def Squeeze( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Squeeze(11)](https://onnx.ai/onnx/operators/onnx__Squeeze.html#squeeze-11 "Online Documentation") + + + Remove single-dimensional entries from the shape of a tensor. + Takes a parameter `axes` with a list of axes to squeeze. + If `axes` is not provided, all the single dimensions will be removed from + the shape. If an axis is selected with shape entry not equal to one, an error is raised. + + + Args: + data: Tensors with at least max(dims) dimensions. + + axes: List of integers indicating the dimensions to squeeze. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(data). + """ + + schema = get_schema("Squeeze", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Squeeze", schema) + return op(*self._prepare_inputs(schema, data), axes=axes) + + def TopK( + self, + X: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + K: INT64, + axis: int = -1, + largest: int = 1, + sorted: int = 1, + ) -> Tuple[ + Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + INT64, + ]: + r"""[🌐 TopK(11)](https://onnx.ai/onnx/operators/onnx__TopK.html#topk-11 "Online Documentation") + + + Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of + shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: + -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] + which contains the values of the top k elements along the specified axis + -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which + contains the indices of the top k elements (original indices from the input + tensor). + + If "largest" is 1 (the default value) then the k largest elements are returned. + If "sorted" is 1 (the default value) then the resulting k elements will be sorted. + If "sorted" is 0, order of returned 'Values' and 'Indices' are undefined. + + Given two equivalent values, this operator uses the indices along the axis as + a tiebreaker. That is, the element with the lower index will appear first. + + + Args: + X: (differentiable) Tensor of shape [a_1, a_2, ..., a_n, r] + + K: (non-differentiable) A 1-D tensor containing a single positive value + corresponding to the number of top elements to retrieve + + axis: Dimension on which to do the sort. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + + largest: Whether to return the top-K largest or smallest elements. + + sorted: Whether to return the elements in sorted order. + """ + + schema = get_schema("TopK", 11, "") + op: Callable[ + ..., + Tuple[ + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + INT64, + ], + ] = Op(self, "TopK", schema) + return op( + *self._prepare_inputs(schema, X, K), axis=axis, largest=largest, sorted=sorted + ) + + def Unique( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: Optional[int] = None, + sorted: int = 1, + ) -> Tuple[ + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + INT64, + INT64, + INT64, + ]: + r"""[🌐 Unique(11)](https://onnx.ai/onnx/operators/onnx__Unique.html#unique-11 "Online Documentation") + + + Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned. + Otherwise the input tensor is flattened and unique values of the flattened tensor are returned. + + This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. + The first output tensor 'Y' contains all unique values or subtensors of the input. + The second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'.. + The third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. ". + The fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input. + + Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input. + + https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html + + Example 1: + input_X = [2, 1, 1, 3, 4, 3] + attribute_sorted = 0 + attribute_axis = None + output_Y = [2, 1, 3, 4] + output_indices = [0, 1, 3, 4] + output_inverse_indices = [0, 1, 1, 2, 3, 2] + output_counts = [1, 2, 2, 1] + + Example 2: + input_X = [[1, 3], [2, 3]] + attribute_sorted = 1 + attribute_axis = None + output_Y = [1, 2, 3] + output_indices = [0, 2, 1] + output_inverse_indices = [0, 2, 1, 2] + output_counts = [1, 1, 2] + + Example 3: + input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] + attribute_sorted = 1 + attribute_axis = 0 + output_Y = [[1, 0, 0], [2, 3, 4]] + output_indices = [0, 2] + output_inverse_indices = [0, 0, 1] + output_counts = [2, 1] + + Example 4: + input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], + [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] + attribute_sorted = 1 + attribute_axis = 1 + + intermediate data are presented below for better understanding: + + there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): + A: [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]], + [[0, 1], [0, 1]]. + + there are 3 unique subtensors: + [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]]. + + sorted unique subtensors: + B: [[0, 1], [0, 1]], + [[1, 1], [1, 1]], + [[2, 1], [2, 1]]. + + output_Y is constructed from B: + [[[0. 1.], [1. 1.], [2. 1.]], + [[0. 1.], [1. 1.], [2. 1.]]] + + output_indices is to map from B to A: + [1, 0, 2] + + output_inverse_indices is to map from A to B: + [1, 0, 2, 0] + + output_counts = [2 1 1] + + + Args: + X: (non-differentiable) A N-D input tensor that is to be processed. + + axis: (Optional) The dimension to apply unique. If not specified, the unique + elements of the flattened input are returned. Negative value means + counting dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + + sorted: (Optional) Whether to sort the unique elements in ascending order + before returning as output. Must be one of 0, or 1 (default). + """ + + schema = get_schema("Unique", 11, "") + op: Callable[ + ..., + Tuple[ + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + INT64, + INT64, + INT64, + ], + ] = Op(self, "Unique", schema) + return op(*self._prepare_inputs(schema, X), axis=axis, sorted=sorted) + + def Unsqueeze( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Unsqueeze(11)](https://onnx.ai/onnx/operators/onnx__Unsqueeze.html#unsqueeze-11 "Online Documentation") + + + Insert single-dimensional entries to the shape of an input tensor (`data`). + Takes one required argument `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`). + + For example: + Given an input tensor (`data`) of shape [3, 4, 5], then + Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. + + The attribute `axes` should not contain any duplicate entries. It is an error if it contains duplicates. + The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`. + Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. + The order of values in `axes` does not matter and can come in any order. + + + + Args: + data: Original tensor + + axes: List of integers indicating the dimensions to be inserted. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(expanded). + """ + + schema = get_schema("Unsqueeze", 11, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Unsqueeze", schema) + return op(*self._prepare_inputs(schema, data), axes=axes) diff --git a/onnxscript/onnx_opset/_impl/opset12.py b/onnxscript/onnx_opset/_impl/opset12.py new file mode 100644 index 0000000000..591dafeaa7 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset12.py @@ -0,0 +1,1120 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import SparseTensorProto, TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset11 import Opset11 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset12(Opset11): + def __new__(cls): + return Opset.__new__(cls, "", 12) + + def __init__(self): + super().__init__() + + def ArgMax( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + select_last_index: int = 0, + ) -> INT64: + r"""[🌐 ArgMax(12)](https://onnx.ai/onnx/operators/onnx__ArgMax.html#argmax-12 "Online Documentation") + + + Computes the indices of the max elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + If select_last_index is True (default False), the index of the last occurrence of the max + is selected if the max appears more than once in the input. Otherwise the index of the + first occurrence is selected. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + select_last_index: Whether to select the last index or the first index if + the {name} appears in multiple indices, default is False (first index). + """ + + schema = get_schema("ArgMax", 12, "") + op: Callable[..., INT64] = Op(self, "ArgMax", schema) + return op( + *self._prepare_inputs(schema, data), + axis=axis, + keepdims=keepdims, + select_last_index=select_last_index, + ) + + def ArgMin( + self, + data: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + axis: int = 0, + keepdims: int = 1, + select_last_index: int = 0, + ) -> INT64: + r"""[🌐 ArgMin(12)](https://onnx.ai/onnx/operators/onnx__ArgMin.html#argmin-12 "Online Documentation") + + + Computes the indices of the min elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + If select_last_index is True (default False), the index of the last occurrence of the min + is selected if the min appears more than once in the input. Otherwise the index of the + first occurrence is selected. + The type of the output tensor is integer. + + Args: + data: An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + select_last_index: Whether to select the last index or the first index if + the {name} appears in multiple indices, default is False (first index). + """ + + schema = get_schema("ArgMin", 12, "") + op: Callable[..., INT64] = Op(self, "ArgMin", schema) + return op( + *self._prepare_inputs(schema, data), + axis=axis, + keepdims=keepdims, + select_last_index=select_last_index, + ) + + def Celu(self, X: FLOAT, alpha: float = 1.0) -> FLOAT: + r"""[🌐 Celu(12)](https://onnx.ai/onnx/operators/onnx__Celu.html#celu-12 "Online Documentation") + + + Continuously Differentiable Exponential Linear Units: + Perform the linear unit element-wise on the input tensor X + using formula: + + :: + + max(0,x) + min(0,alpha*(exp(x/alpha)-1)) + + + + + Args: + X: (differentiable) Input tensor + + alpha: The Alpha value in Celu formula which control the shape of the unit. + The default value is 1.0. + """ + + schema = get_schema("Celu", 12, "") + op: Callable[..., FLOAT] = Op(self, "Celu", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha) + + def Clip( + self, + input: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + min: Optional[ + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + max: Optional[ + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Clip(12)](https://onnx.ai/onnx/operators/onnx__Clip.html#clip-12 "Online Documentation") + + + Clip operator limits the given input within an interval. The interval is + specified by the inputs 'min' and 'max'. They default to + numeric_limits::lowest() and numeric_limits::max(), respectively. + + + Args: + input: Input tensor whose elements to be clipped + + min: (optional) Minimum value, under which element is replaced by min. It + must be a scalar(tensor of empty shape). + + max: (optional) Maximum value, above which element is replaced by max. It + must be a scalar(tensor of empty shape). + """ + + schema = get_schema("Clip", 12, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Clip", schema) + return op(*self._prepare_inputs(schema, input, min, max)) + + def Constant( + self, + sparse_value: Optional[SparseTensorProto] = None, + value: Optional[TensorProto] = None, + value_float: Optional[float] = None, + value_floats: Optional[Sequence[float]] = None, + value_int: Optional[int] = None, + value_ints: Optional[Sequence[int]] = None, + value_string: Optional[str] = None, + value_strings: Optional[Sequence[str]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Constant(12)](https://onnx.ai/onnx/operators/onnx__Constant.html#constant-12 "Online Documentation") + + + This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, + or value_* must be specified. + + + Args: + sparse_value: The value for the elements of the output tensor in sparse + format. + + value: The value for the elements of the output tensor. + + value_float: The value for the sole element for the scalar, float32, output + tensor. + + value_floats: The values for the elements for the 1D, float32, output + tensor. + + value_int: The value for the sole element for the scalar, int64, output + tensor. + + value_ints: The values for the elements for the 1D, int64, output tensor. + + value_string: The value for the sole element for the scalar, UTF-8 string, + output tensor. + + value_strings: The values for the elements for the 1D, UTF-8 string, output + tensor. + """ + + schema = get_schema("Constant", 12, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Constant", schema) + return op( + sparse_value=sparse_value, + value=value, + value_float=value_float, + value_floats=value_floats, + value_int=value_int, + value_ints=value_ints, + value_string=value_string, + value_strings=value_strings, + ) + + def Dropout( + self, + data: Union[DOUBLE, FLOAT, FLOAT16], + ratio: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + training_mode: Optional[BOOL] = None, + seed: Optional[int] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], BOOL]: + r"""[🌐 Dropout(12)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-12 "Online Documentation") + + + Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, + output (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout; + Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, + the user can simply not pass `training_mode` input or set it to false. + :: + + output = scale * data * mask, + + + where + :: + + scale = 1. / (1. - ratio). + + + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + data: The input data as Tensor. + + ratio: (optional) The ratio of random dropout, with value in [0, 1). If this + input was not set, or if it was set to 0, the output would be a simple + copy of the input. If it's non-zero, output will be a random dropout of + the scaled input, which is typically the case during training. It is an + optional value, if not specified it will default to 0.5. + + training_mode: (optional) If set to true then it indicates dropout is being + used for training. It is an optional value hence unless specified + explicitly, it is false. If it is false, ratio is ignored and the + operation mimics inference mode where nothing will be dropped from the + input data and if mask is requested as output it will contain all ones. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("Dropout", 12, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], BOOL]] = Op( + self, "Dropout", schema + ) + return op(*self._prepare_inputs(schema, data, ratio, training_mode), seed=seed) + + def Einsum( + self, + *Inputs: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + equation: Optional[str] = None, + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Einsum(12)](https://onnx.ai/onnx/operators/onnx__Einsum.html#einsum-12 "Online Documentation") + + + An einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation + + :: + + + where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2) + that do not occur in the output-term. + + The Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation + convention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to + an operand tensor, and the characters within the terms correspond to operands dimensions. + + This sequence may be followed by "->" to separate the left and right hand side of the equation. + If the equation contains "->" followed by the right-hand side, the explicit (not classical) form of the Einstein + summation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases, + output indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the + equation. + + When a dimension character is repeated in the left-hand side, it represents summation along the dimension. + + The equation may contain ellipsis ("...") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions. + Specifically, every occurrence of ellipsis in the equation must represent the same number of dimensions. + The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the + beginning of the output. The equation string may contain space (U+0020) character. + + + Args: + Inputs: (variadic, differentiable) Operands + + equation: Einsum expression string. + """ + + schema = get_schema("Einsum", 12, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Einsum", schema) + return op(*self._prepare_inputs(schema, *Inputs), equation=equation) + + def GatherND( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + batch_dims: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GatherND(12)](https://onnx.ai/onnx/operators/onnx__GatherND.html#gathernd-12 "Online Documentation") + + + Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers + slices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1 - b`. + + `indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, + where each element defines a slice of `data` + + `batch_dims` (denoted as `b`) is an integer indicating the number of batch dimensions, i.e the leading `b` number of dimensions of + `data` tensor and `indices` are representing the batches, and the gather starts from the `b+1` dimension. + + Some salient points about the inputs' rank and shape: + + 1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q` + + 2) The first `b` dimensions of the shape of `indices` tensor and `data` tensor must be equal. + + 3) b < min(q, r) is to be honored. + + 4) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r-b` (inclusive) + + 5) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`. + It is an error if any of the index values are out of bounds. + + The output is computed as follows: + + The output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`. + + 1) If `indices_shape[-1] > r-b` => error condition + + 2) If `indices_shape[-1] == r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensors + containing 1-D tensors of dimension `r-b`, where `N` is an integer equals to the product of 1 and all the elements in the batch dimensions + of the indices_shape. Let us think of each such `r-b` ranked tensor as `indices_slice`. Each *scalar value* corresponding to `data[0:b-1,indices_slice]` + is filled into the corresponding location of the `(q-b-1)`-dimensional tensor to form the `output` tensor (Example 1 below) + + 3) If `indices_shape[-1] < r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensor + containing 1-D tensors of dimension `< r-b`. Let us think of each such tensors as `indices_slice`. Each *tensor slice* corresponding + to `data[0:b-1, indices_slice , :]` is filled into the corresponding location of the `(q-b-1)`-dimensional tensor + to form the `output` tensor (Examples 2, 3, 4 and 5 below) + + This operator is the inverse of `ScatterND`. + + `Example 1` + + batch_dims = 0 + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[0,0],[1,1]] # indices_shape = [2, 2] + + output = [0,3] # output_shape = [2] + + `Example 2` + + batch_dims = 0 + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[1],[0]] # indices_shape = [2, 1] + + output = [[2,3],[0,1]] # output_shape = [2, 2] + + `Example 3` + + batch_dims = 0 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[0,1],[1,0]] # indices_shape = [2, 2] + + output = [[2,3],[4,5]] # output_shape = [2, 2] + + `Example 4` + + batch_dims = 0 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] + + output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] + + `Example 5` + + batch_dims = 1 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[1],[0]] # indices_shape = [2, 1] + + output = [[2,3],[4,5]] # output_shape = [2, 2] + + + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of rank q >= 1. All index values are expected to be within + bounds [-s, s-1] along axis of size s. It is an error if any of the + index values are out of bounds. + + batch_dims: The number of batch dimensions. The gather of indexing starts + from dimension of data[batch_dims:] + """ + + schema = get_schema("GatherND", 12, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GatherND", schema) + return op(*self._prepare_inputs(schema, data, indices), batch_dims=batch_dims) + + def GreaterOrEqual( + self, + A: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + B: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> BOOL: + r"""[🌐 GreaterOrEqual(12)](https://onnx.ai/onnx/operators/onnx__GreaterOrEqual.html#greaterorequal-12 "Online Documentation") + + + Returns the tensor resulted from performing the `greater_equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("GreaterOrEqual", 12, "") + op: Callable[..., BOOL] = Op(self, "GreaterOrEqual", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def LessOrEqual( + self, + A: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + B: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> BOOL: + r"""[🌐 LessOrEqual(12)](https://onnx.ai/onnx/operators/onnx__LessOrEqual.html#lessorequal-12 "Online Documentation") + + + Returns the tensor resulted from performing the `less_equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("LessOrEqual", 12, "") + op: Callable[..., BOOL] = Op(self, "LessOrEqual", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Max( + self, + *data_0: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Max(12)](https://onnx.ai/onnx/operators/onnx__Max.html#max-12 "Online Documentation") + + + Element-wise max of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for max. + """ + + schema = get_schema("Max", 12, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Max", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def MaxPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16, INT8, UINT8], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + dilations: Optional[Sequence[int]] = None, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + storage_order: int = 0, + strides: Optional[Sequence[int]] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16, INT8, UINT8], INT64]: + r"""[🌐 MaxPool(12)](https://onnx.ai/onnx/operators/onnx__MaxPool.html#maxpool-12 "Online Documentation") + + + MaxPool consumes an input tensor X and applies max pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + max pooling consisting of computing the max on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + ``` + The output of each pooling window is maximum number of elements exclude pad. + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. Optionally, if dimension + denotation is in effect, the operation expects the input data tensor to + arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, + DATA_FEATURE, DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + dilations: Dilation value along each spatial axis of filter. If not present, + the dilation defaults to 1 along each spatial axis. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + storage_order: The storage order of the tensor. 0 is row major, and 1 is + column major. This attribute is used only to convert an n-tuple index + value into a single integer value for producing the second output. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("MaxPool", 12, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16, INT8, UINT8], INT64]] = Op( + self, "MaxPool", schema + ) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + pads=pads, + storage_order=storage_order, + strides=strides, + ) + + def Min( + self, + *data_0: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Min(12)](https://onnx.ai/onnx/operators/onnx__Min.html#min-12 "Online Documentation") + + + Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for min. + """ + + schema = get_schema("Min", 12, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Min", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def NegativeLogLikelihoodLoss( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + target: Union[INT32, INT64], + weight: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + ignore_index: Optional[int] = None, + reduction: str = "mean", + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 NegativeLogLikelihoodLoss(12)](https://onnx.ai/onnx/operators/onnx__NegativeLogLikelihoodLoss.html#negativeloglikelihoodloss-12 "Online Documentation") + + + A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. + Its "input" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0. + The "input" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C). + The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes) + or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples. + The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as: + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. + When an optional "weight" is provided, the sample loss is calculated as: + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. + loss is zero for the case when target-value equals ignore_index. + + loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index + If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk). + If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged: + mean(loss), if "weight" is not provided, + or if weight is provided, + sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. + If "reduction" attribute is set to "sum", the output is a scalar: + sum(loss). + See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. + Example 1: + // negative log likelihood loss, "none" reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] + // print(loss) + // [[-3. -2.] + // [-0. -2.]] + Example 2: + // weighted negative log likelihood loss, sum reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + loss = np.sum(loss) + // print(loss) + // -1.1 + Example 3: + // weighted negative log likelihood loss, mean reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + weight_total = 0 + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + weight_total = weight_total + weight[c] + loss = np.sum(loss) / weight_total + // print(loss) + // -1.57 + + + Args: + input: Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk). + + target: Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element + value shall be in range of [0, C). If ignore_index is specified, it may + have a value outside [0, C) and the target values should either be in + the range [0, C) or have the value ignore_index. + + weight: (optional) Optional rescaling weight tensor. If given, it has to be + a tensor of size C. Otherwise, it is treated as if having all ones. + + ignore_index: Specifies a target value that is ignored and does not + contribute to the input gradient. It's an optional value. + + reduction: Type of reduction to apply to loss: none, sum, mean (default). + 'none': the output is the loss for each sample. 'sum': the output will + be summed. 'mean': the sum of the output will be divided by the sum of + applied weights. + """ + + schema = get_schema("NegativeLogLikelihoodLoss", 12, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "NegativeLogLikelihoodLoss", schema + ) + return op( + *self._prepare_inputs(schema, input, target, weight), + ignore_index=ignore_index, + reduction=reduction, + ) + + def Pow( + self, + X: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64], + Y: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64]: + r"""[🌐 Pow(12)](https://onnx.ai/onnx/operators/onnx__Pow.html#pow-12 "Online Documentation") + + + Pow takes input data (Tensor) and exponent Tensor, and + produces one output data (Tensor) where the function `f(x) = x^exponent`, + is applied to the data tensor elementwise. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + X: First operand, base of the exponent. + + Y: Second operand, power of the exponent. + """ + + schema = get_schema("Pow", 12, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64]] = Op( + self, "Pow", schema + ) + return op(*self._prepare_inputs(schema, X, Y)) + + def ReduceMax( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMax(12)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-12 "Online Documentation") + + + Computes the max of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMax", 12, "") + op: Callable[ + ..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8] + ] = Op(self, "ReduceMax", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMin( + self, + data: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMin(12)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-12 "Online Documentation") + + + Computes the min of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then + the resulted tensor have the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMin", 12, "") + op: Callable[ + ..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8] + ] = Op(self, "ReduceMin", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def SoftmaxCrossEntropyLoss( + self, + scores: Union[DOUBLE, FLOAT, FLOAT16], + labels: Union[INT32, INT64], + weights: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + ignore_index: Optional[int] = None, + reduction: str = "mean", + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 SoftmaxCrossEntropyLoss(12)](https://onnx.ai/onnx/operators/onnx__SoftmaxCrossEntropyLoss.html#softmaxcrossentropyloss-12 "Online Documentation") + + Loss function that measures the softmax cross entropy + between 'scores' and 'labels'. + This operator first computes a loss tensor whose shape is identical to the labels input. + If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N). + If the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), + the loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. + After L is available, this operator can optionally do a reduction operator. + + shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. + shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. + + The loss for one sample, l_i, can caculated as follows: + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. + or + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. + + loss is zero for the case when label-value equals ignore_index. + l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index + + where: + p = Softmax(scores) + y = Log(p) + c = labels[i][d1][d2]...[dk] + + Finally, L is optionally reduced: + If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). + If reduction = 'sum', the output is scalar: Sum(L). + If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), + where tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]. + + + Args: + scores: The predicted outputs with shape [batch_size, class_size], or + [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of + dimensions. + + labels: The ground truth output tensor, with shape [batch_size], or + [batch_size, D1, D2, ..., Dk], where K is the number of dimensions. + Labels element value shall be in range of [0, C). If ignore_index is + specified, it may have a value outside [0, C) and the label values + should either be in the range [0, C) or have the value ignore_index. + + weights: (optional) A manual rescaling weight given to each class. If given, + it has to be a 1D Tensor assigning weight to each of the classes. + Otherwise, it is treated as if having all ones. + + ignore_index: Specifies a target value that is ignored and does not + contribute to the input gradient. It's an optional value. + + reduction: Type of reduction to apply to loss: none, sum, mean(default). + 'none': no reduction will be applied, 'sum': the output will be summed. + 'mean': the sum of the output will be divided by the number of elements + in the output. + """ + + schema = get_schema("SoftmaxCrossEntropyLoss", 12, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "SoftmaxCrossEntropyLoss", schema) + return op( + *self._prepare_inputs(schema, scores, labels, weights), + ignore_index=ignore_index, + reduction=reduction, + ) diff --git a/onnxscript/onnx_opset/_impl/opset13.py b/onnxscript/onnx_opset/_impl/opset13.py new file mode 100644 index 0000000000..b96614b969 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset13.py @@ -0,0 +1,5022 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto, SparseTensorProto, TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset12 import Opset12 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset13(Opset12): + def __new__(cls): + return Opset.__new__(cls, "", 13) + + def __init__(self): + super().__init__() + + def Abs( + self, + X: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Abs(13)](https://onnx.ai/onnx/operators/onnx__Abs.html#abs-13 "Online Documentation") + + + Absolute takes one input data (Tensor) and produces one output data + (Tensor) where the absolute is, y = abs(x), is applied to + the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Abs", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Abs", schema) + return op(*self._prepare_inputs(schema, X)) + + def Add( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Add(13)](https://onnx.ai/onnx/operators/onnx__Add.html#add-13 "Online Documentation") + + + Performs element-wise binary addition (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Add", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "Add", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def ArgMax( + self, + data: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + keepdims: int = 1, + select_last_index: int = 0, + ) -> INT64: + r"""[🌐 ArgMax(13)](https://onnx.ai/onnx/operators/onnx__ArgMax.html#argmax-13 "Online Documentation") + + + Computes the indices of the max elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. + If select_last_index is True (default False), the index of the last occurrence of the max + is selected if the max appears more than once in the input. Otherwise the index of the + first occurrence is selected. + The type of the output tensor is integer. + + Args: + data: (non-differentiable) An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + select_last_index: Whether to select the last index or the first index if + the {name} appears in multiple indices, default is False (first index). + """ + + schema = get_schema("ArgMax", 13, "") + op: Callable[..., INT64] = Op(self, "ArgMax", schema) + return op( + *self._prepare_inputs(schema, data), + axis=axis, + keepdims=keepdims, + select_last_index=select_last_index, + ) + + def ArgMin( + self, + data: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + keepdims: int = 1, + select_last_index: int = 0, + ) -> INT64: + r"""[🌐 ArgMin(13)](https://onnx.ai/onnx/operators/onnx__ArgMin.html#argmin-13 "Online Documentation") + + + Computes the indices of the min elements of the input tensor's element along the + provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. + If keepdims equals 0, then the resulting tensor has the reduced dimension pruned. + If select_last_index is True (default False), the index of the last occurrence of the min + is selected if the min appears more than once in the input. Otherwise the index of the + first occurrence is selected. + The type of the output tensor is integer. + + Args: + data: (non-differentiable) An input tensor. + + axis: The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + select_last_index: Whether to select the last index or the first index if + the {name} appears in multiple indices, default is False (first index). + """ + + schema = get_schema("ArgMin", 13, "") + op: Callable[..., INT64] = Op(self, "ArgMin", schema) + return op( + *self._prepare_inputs(schema, data), + axis=axis, + keepdims=keepdims, + select_last_index=select_last_index, + ) + + def Cast( + self, + input: Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + to: Optional[int] = None, + ) -> Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Cast(13)](https://onnx.ai/onnx/operators/onnx__Cast.html#cast-13 "Online Documentation") + + + The operator casts the elements of a given input tensor to a data type + specified by the 'to' argument and returns an output tensor of the same size in + the converted type. The 'to' argument must be one of the data types specified + in the 'DataType' enum field in the TensorProto message. + + Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations + (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may + result 100. There are some string literals reserved for special floating-point values; + "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively. + Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly, + this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors + to string tensors, plain floating-point representation (such as "314.15926") would be used. + Converting non-numerical-literal string such as "Hello World!" is an undefined behavior. Cases + of converting string representing floating-point arithmetic value, such as "2.718", to INT is an undefined behavior. + + Conversion from a numerical type to any numerical type is always allowed. + User must be aware of precision loss and value change caused by range difference between two types. + For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting + an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type. + + In more detail, the conversion among numerical types should follow these rules: + + * Casting from floating point to: + * floating point: +/- infinity if OOR (out of range). + * fixed point: undefined if OOR. + * bool: +/- 0.0 to False; all else to True. + * Casting from fixed point to: + * floating point: +/- infinity if OOR. (+ infinity in the case of uint) + * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for + signed types). For example, 200 (int16) -> -56 (int8). + * bool: zero to False; nonzero to True. + * Casting from bool to: + * floating point: `{1.0, 0.0}`. + * fixed point: `{1, 0}`. + * bool: no change. + + + Args: + input: (differentiable) Input tensor to be cast. + + to: The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + """ + + schema = get_schema("Cast", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Cast", schema) + return op(*self._prepare_inputs(schema, input), to=to) + + def Ceil( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Ceil(13)](https://onnx.ai/onnx/operators/onnx__Ceil.html#ceil-13 "Online Documentation") + + + Ceil takes one input data (Tensor) and produces one output data + (Tensor) where the ceil is, y = ceil(x), is applied to + the tensor elementwise. + + + Args: + X: (non-differentiable) Input tensor + """ + + schema = get_schema("Ceil", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Ceil", schema) + return op(*self._prepare_inputs(schema, X)) + + def Clip( + self, + input: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + min: Optional[ + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + max: Optional[ + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Clip(13)](https://onnx.ai/onnx/operators/onnx__Clip.html#clip-13 "Online Documentation") + + + Clip operator limits the given input within an interval. The interval is + specified by the inputs 'min' and 'max'. They default to + numeric_limits::lowest() and numeric_limits::max(), respectively. + + + Args: + input: (differentiable) Input tensor whose elements to be clipped + + min: (optional, non-differentiable) Minimum value, under which element is + replaced by min. It must be a scalar(tensor of empty shape). + + max: (optional, non-differentiable) Maximum value, above which element is + replaced by max. It must be a scalar(tensor of empty shape). + """ + + schema = get_schema("Clip", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Clip", schema) + return op(*self._prepare_inputs(schema, input, min, max)) + + def Concat( + self, + *inputs: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: Optional[int] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Concat(13)](https://onnx.ai/onnx/operators/onnx__Concat.html#concat-13 "Online Documentation") + + Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. + + Args: + inputs: (variadic, differentiable) List of tensors for concatenation + + axis: Which axis to concat on. A negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(inputs).. + """ + + schema = get_schema("Concat", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Concat", schema) + return op(*self._prepare_inputs(schema, *inputs), axis=axis) + + def Constant( + self, + sparse_value: Optional[SparseTensorProto] = None, + value: Optional[TensorProto] = None, + value_float: Optional[float] = None, + value_floats: Optional[Sequence[float]] = None, + value_int: Optional[int] = None, + value_ints: Optional[Sequence[int]] = None, + value_string: Optional[str] = None, + value_strings: Optional[Sequence[str]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Constant(13)](https://onnx.ai/onnx/operators/onnx__Constant.html#constant-13 "Online Documentation") + + + This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, + or value_* must be specified. + + + Args: + sparse_value: The value for the elements of the output tensor in sparse + format. + + value: The value for the elements of the output tensor. + + value_float: The value for the sole element for the scalar, float32, output + tensor. + + value_floats: The values for the elements for the 1D, float32, output + tensor. + + value_int: The value for the sole element for the scalar, int64, output + tensor. + + value_ints: The values for the elements for the 1D, int64, output tensor. + + value_string: The value for the sole element for the scalar, UTF-8 string, + output tensor. + + value_strings: The values for the elements for the 1D, UTF-8 string, output + tensor. + """ + + schema = get_schema("Constant", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Constant", schema) + return op( + sparse_value=sparse_value, + value=value, + value_float=value_float, + value_floats=value_floats, + value_int=value_int, + value_ints=value_ints, + value_string=value_string, + value_strings=value_strings, + ) + + def DepthToSpace( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + blocksize: Optional[int] = None, + mode: str = "DCR", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 DepthToSpace(13)](https://onnx.ai/onnx/operators/onnx__DepthToSpace.html#depthtospace-13 "Online Documentation") + + DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. + This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of + the input tensor where values from the depth dimension are moved in spatial blocks to the height + and width dimensions. By default, `mode` = `DCR`. + In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the + following order: depth, column, and then row. The output y is computed from the input x as below: + + b, c, h, w = x.shape + + tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) + + tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) + + y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) + + + In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the + following order: column, row, and the depth. The output y is computed from the input x as below: + + b, c, h, w = x.shape + + tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) + + tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) + + y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) + + + + Args: + input: (differentiable) Input tensor of [N,C,H,W], where N is the batch + axis, C is the channel or depth, H is the height and W is the width. + + blocksize: Blocks of [blocksize, blocksize] are moved. + + mode: DCR (default) for depth-column-row order re-arrangement. Use CRD for + column-row-depth order. + """ + + schema = get_schema("DepthToSpace", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "DepthToSpace", schema) + return op(*self._prepare_inputs(schema, input), blocksize=blocksize, mode=mode) + + def DequantizeLinear( + self, + x: Union[INT32, INT8, UINT8], + x_scale: FLOAT, + x_zero_point: Optional[Union[INT32, INT8, UINT8]] = None, + axis: int = 1, + ) -> FLOAT: + r"""[🌐 DequantizeLinear(13)](https://onnx.ai/onnx/operators/onnx__DequantizeLinear.html#dequantizelinear-13 "Online Documentation") + + + The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. + The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape, and can be either a scalar + for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. + 'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32, + there's no zero point (zero point is supposed to be 0). + + + Args: + x: N-D quantized input tensor to be de-quantized. + + x_scale: Scale for input 'x'. It can be a scalar, which means a + per-tensor/layer dequantization, or a 1-D tensor for per-axis + dequantization. + + x_zero_point: (optional) Zero point for input 'x'. Shape must match x_scale. + It's optional. Zero point is 0 when it's not specified. + + axis: (Optional) The axis of the dequantizing dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + """ + + schema = get_schema("DequantizeLinear", 13, "") + op: Callable[..., FLOAT] = Op(self, "DequantizeLinear", schema) + return op(*self._prepare_inputs(schema, x, x_scale, x_zero_point), axis=axis) + + def Div( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Div(13)](https://onnx.ai/onnx/operators/onnx__Div.html#div-13 "Online Documentation") + + + Performs element-wise binary division (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Div", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "Div", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Dropout( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ratio: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + training_mode: Optional[BOOL] = None, + seed: Optional[int] = None, + ) -> Tuple[Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], BOOL]: + r"""[🌐 Dropout(13)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-13 "Online Documentation") + + + Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, + output (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout; + Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, + the user can simply not pass `training_mode` input or set it to false. + :: + + output = scale * data * mask, + + + where + :: + + scale = 1. / (1. - ratio). + + + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + data: (differentiable) The input data as Tensor. + + ratio: (optional, non-differentiable) The ratio of random dropout, with + value in [0, 1). If this input was not set, or if it was set to 0, the + output would be a simple copy of the input. If it's non-zero, output + will be a random dropout of the scaled input, which is typically the + case during training. It is an optional value, if not specified it will + default to 0.5. + + training_mode: (optional, non-differentiable) If set to true then it + indicates dropout is being used for training. It is an optional value + hence unless specified explicitly, it is false. If it is false, ratio is + ignored and the operation mimics inference mode where nothing will be + dropped from the input data and if mask is requested as output it will + contain all ones. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("Dropout", 13, "") + op: Callable[..., Tuple[Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], BOOL]] = Op( + self, "Dropout", schema + ) + return op(*self._prepare_inputs(schema, data, ratio, training_mode), seed=seed) + + def Equal( + self, + A: Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 Equal(13)](https://onnx.ai/onnx/operators/onnx__Equal.html#equal-13 "Online Documentation") + + + Returns the tensor resulted from performing the `equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("Equal", 13, "") + op: Callable[..., BOOL] = Op(self, "Equal", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Erf( + self, + input: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Erf(13)](https://onnx.ai/onnx/operators/onnx__Erf.html#erf-13 "Online Documentation") + + + Computes the error function of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Erf", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Erf", schema) + return op(*self._prepare_inputs(schema, input)) + + def Exp( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Exp(13)](https://onnx.ai/onnx/operators/onnx__Exp.html#exp-13 "Online Documentation") + + + Calculates the exponential of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Exp", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Exp", schema) + return op(*self._prepare_inputs(schema, input)) + + def Expand( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: INT64, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Expand(13)](https://onnx.ai/onnx/operators/onnx__Expand.html#expand-13 "Online Documentation") + + + Broadcast the input tensor following the given shape and the broadcast rule. + The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): + Dimensions are right alignment; + Two corresponding dimensions must have the same value, or one of them is equal to 1. + Also, this operator is similar to numpy.broadcast_to(input, shape), + but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). + It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, + or the shape.ndim < input.shape.ndim. + + + Args: + input: (differentiable) Input tensor + + shape: (non-differentiable) A 1-D tensor indicates the shape you want to + expand to, following the broadcast rule + """ + + schema = get_schema("Expand", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Expand", schema) + return op(*self._prepare_inputs(schema, input, shape)) + + def Flatten( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 1, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Flatten(13)](https://onnx.ai/onnx/operators/onnx__Flatten.html#flatten-13 "Online Documentation") + + + Flattens the input tensor into a 2D matrix. If input tensor has shape + (d_0, d_1, ... d_n) then the output will have shape + (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). + + + Args: + input: (differentiable) A tensor of rank >= axis. + + axis: Indicate up to which input dimensions (exclusive) should be flattened + to the outer dimension of the output. The value for axis must be in the + range [-r, r], where r is the rank of the input tensor. Negative value + means counting dimensions from the back. When axis = 0, the shape of the + output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input + tensor is (d_0, d_1, ... d_n). + """ + + schema = get_schema("Flatten", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Flatten", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Floor( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Floor(13)](https://onnx.ai/onnx/operators/onnx__Floor.html#floor-13 "Online Documentation") + + + Floor takes one input data (Tensor) and produces one output data + (Tensor) where the floor is, y = floor(x), is applied to + the tensor elementwise. + + + Args: + X: (non-differentiable) Input tensor + """ + + schema = get_schema("Floor", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Floor", schema) + return op(*self._prepare_inputs(schema, X)) + + def Gather( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + axis: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Gather(13)](https://onnx.ai/onnx/operators/onnx__Gather.html#gather-13 "Online Documentation") + + + Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather + entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates + them in an output tensor of rank q + (r - 1). + + axis = 0 : + + Let + k = indices[i_{0}, ..., i_{q-1}] + Then + output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}] + + :: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + indices = [ + [0, 1], + [1, 2], + ] + output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], + ] + + + axis = 1 : + + Let + k = indices[i_{0}, ..., i_{q-1}] + Then + output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}] + + :: + + data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], + ] + indices = [ + [0, 2], + ] + axis = 1, + output = [ + [[1.0, 1.9]], + [[2.3, 3.9]], + [[4.5, 5.9]], + ] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, of any rank q. + All index values are expected to be within bounds [-s, s-1] along axis + of size s. It is an error if any of the index values are out of bounds. + + axis: Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("Gather", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Gather", schema) + return op(*self._prepare_inputs(schema, data, indices), axis=axis) + + def GatherElements( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + axis: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GatherElements(13)](https://onnx.ai/onnx/operators/onnx__GatherElements.html#gatherelements-13 "Online Documentation") + + + + GatherElements takes two inputs `data` and `indices` of the same rank r >= 1 + and an optional attribute `axis` that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). It is an indexing operation + that produces its output by indexing into the input data tensor at index + positions determined by elements of the `indices` tensor. + Its output shape is the same as the shape of `indices` and consists of one value + (gathered from the `data`) for each element in `indices`. + + For instance, in the 3-D case (r = 3), the output produced is determined + by the following equations: + :: + + out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, + out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, + out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, + + + + This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation. + + Example 1: + :: + + data = [ + [1, 2], + [3, 4], + ] + indices = [ + [0, 0], + [1, 0], + ] + axis = 1 + output = [ + [1, 1], + [4, 3], + ] + + + Example 2: + :: + + data = [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ] + indices = [ + [1, 2, 0], + [2, 0, 0], + ] + axis = 0 + output = [ + [4, 8, 3], + [7, 2, 3], + ] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, with the same + rank r as the input. All index values are expected to be within bounds + [-s, s-1] along axis of size s. It is an error if any of the index + values are out of bounds. + + axis: Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("GatherElements", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GatherElements", schema) + return op(*self._prepare_inputs(schema, data, indices), axis=axis) + + def GatherND( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + batch_dims: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GatherND(13)](https://onnx.ai/onnx/operators/onnx__GatherND.html#gathernd-13 "Online Documentation") + + + Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers + slices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1 - b`. + + `indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, + where each element defines a slice of `data` + + `batch_dims` (denoted as `b`) is an integer indicating the number of batch dimensions, i.e the leading `b` number of dimensions of + `data` tensor and `indices` are representing the batches, and the gather starts from the `b+1` dimension. + + Some salient points about the inputs' rank and shape: + + 1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q` + + 2) The first `b` dimensions of the shape of `indices` tensor and `data` tensor must be equal. + + 3) b < min(q, r) is to be honored. + + 4) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r-b` (inclusive) + + 5) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`. + It is an error if any of the index values are out of bounds. + + The output is computed as follows: + + The output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`. + + 1) If `indices_shape[-1] > r-b` => error condition + + 2) If `indices_shape[-1] == r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensors + containing 1-D tensors of dimension `r-b`, where `N` is an integer equals to the product of 1 and all the elements in the batch dimensions + of the indices_shape. Let us think of each such `r-b` ranked tensor as `indices_slice`. Each *scalar value* corresponding to `data[0:b-1,indices_slice]` + is filled into the corresponding location of the `(q-b-1)`-dimensional tensor to form the `output` tensor (Example 1 below) + + 3) If `indices_shape[-1] < r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensor + containing 1-D tensors of dimension `< r-b`. Let us think of each such tensors as `indices_slice`. Each *tensor slice* corresponding + to `data[0:b-1, indices_slice , :]` is filled into the corresponding location of the `(q-b-1)`-dimensional tensor + to form the `output` tensor (Examples 2, 3, 4 and 5 below) + + This operator is the inverse of `ScatterND`. + + `Example 1` + + batch_dims = 0 + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[0,0],[1,1]] # indices_shape = [2, 2] + + output = [0,3] # output_shape = [2] + + `Example 2` + + batch_dims = 0 + + data = [[0,1],[2,3]] # data_shape = [2, 2] + + indices = [[1],[0]] # indices_shape = [2, 1] + + output = [[2,3],[0,1]] # output_shape = [2, 2] + + `Example 3` + + batch_dims = 0 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[0,1],[1,0]] # indices_shape = [2, 2] + + output = [[2,3],[4,5]] # output_shape = [2, 2] + + `Example 4` + + batch_dims = 0 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] + + output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] + + `Example 5` + + batch_dims = 1 + + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + + indices = [[1],[0]] # indices_shape = [2, 1] + + output = [[2,3],[4,5]] # output_shape = [2, 2] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of rank q >= 1. All index values are + expected to be within bounds [-s, s-1] along axis of size s. It is an + error if any of the index values are out of bounds. + + batch_dims: The number of batch dimensions. The gather of indexing starts + from dimension of data[batch_dims:] + """ + + schema = get_schema("GatherND", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GatherND", schema) + return op(*self._prepare_inputs(schema, data, indices), batch_dims=batch_dims) + + def Gemm( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + C: Optional[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = None, + alpha: float = 1.0, + beta: float = 1.0, + transA: int = 0, + transB: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Gemm(13)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-13 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + + A' = transpose(A) if transA else A + + B' = transpose(B) if transB else B + + Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), + input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), + and output tensor Y has shape (M, N). A will be transposed before doing the + computation if attribute transA is non-zero, same for B and transB. + This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check `Broadcasting in ONNX `_. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + A: (differentiable) Input tensor A. The shape of A should be (M, K) if + transA is 0, or (K, M) if transA is non-zero. + + B: (differentiable) Input tensor B. The shape of B should be (K, N) if + transB is 0, or (N, K) if transB is non-zero. + + C: (optional, differentiable) Optional input tensor C. If not specified, the + computation is done as if C is a scalar 0. The shape of C should be + unidirectional broadcastable to (M, N). + + alpha: Scalar multiplier for the product of input tensors A * B. + + beta: Scalar multiplier for input tensor C. + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "Gemm", schema) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + transA=transA, + transB=transB, + ) + + def Greater( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 Greater(13)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-13 "Online Documentation") + + + Returns the tensor resulted from performing the `greater` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("Greater", 13, "") + op: Callable[..., BOOL] = Op(self, "Greater", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Hardmax( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], axis: int = -1 + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Hardmax(13)](https://onnx.ai/onnx/operators/onnx__Hardmax.html#hardmax-13 "Online Documentation") + + + The operator computes the hardmax values for the given input: + + Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 otherwise + + The "axis" attribute indicates the dimension along which Hardmax + will be performed. The output tensor has the same shape + and contains the Hardmax values of the corresponding input. + + + Args: + input: (differentiable) The input tensor of rank >= axis. + + axis: + Describes the dimension Hardmax will be performed on. + Negative value + means counting dimensions + from the back. Accepted range is [-r, r-1] + where r = rank(input). + """ + + schema = get_schema("Hardmax", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "Hardmax", schema + ) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Identity( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Identity(13)](https://onnx.ai/onnx/operators/onnx__Identity.html#identity-13 "Online Documentation") + + Identity operator + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Identity", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Identity", schema) + return op(*self._prepare_inputs(schema, input)) + + def If( + self, + cond: BOOL, + else_branch: Optional[GraphProto] = None, + then_branch: Optional[GraphProto] = None, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 If(13)](https://onnx.ai/onnx/operators/onnx__If.html#if-13 "Online Documentation") + + If conditional + + Args: + cond: Condition for the if + + else_branch: Graph to run if condition is false. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the then_branch. + + then_branch: Graph to run if condition is true. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the else_branch. + """ + + schema = get_schema("If", 13, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "If", schema) + return op( + *self._prepare_inputs(schema, cond), + else_branch=else_branch, + then_branch=then_branch, + ) + + def IsNaN(self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]) -> BOOL: + r"""[🌐 IsNaN(13)](https://onnx.ai/onnx/operators/onnx__IsNaN.html#isnan-13 "Online Documentation") + + Returns which elements of the input are NaN. + + Args: + X: (non-differentiable) input + """ + + schema = get_schema("IsNaN", 13, "") + op: Callable[..., BOOL] = Op(self, "IsNaN", schema) + return op(*self._prepare_inputs(schema, X)) + + def LRN( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + alpha: float = 9.999999747378752e-05, + beta: float = 0.75, + bias: float = 1.0, + size: Optional[int] = None, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LRN(13)](https://onnx.ai/onnx/operators/onnx__LRN.html#lrn-13 "Online Documentation") + + + Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). + It normalizes over local input regions. + The local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor + of shape (N x C x D1 x D2, ..., Dk), its region is + {X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}. + + square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), + where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)). + + Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. Optionally, if dimension + denotation is in effect, the operation expects the input data tensor to + arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, + DATA_FEATURE, DATA_FEATURE ...]. + + alpha: Scaling parameter. + + beta: The exponent. + + size: The number of channels to sum over + """ + + schema = get_schema("LRN", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "LRN", schema) + return op( + *self._prepare_inputs(schema, X), alpha=alpha, beta=beta, bias=bias, size=size + ) + + def Less( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 Less(13)](https://onnx.ai/onnx/operators/onnx__Less.html#less-13 "Online Documentation") + + + Returns the tensor resulted from performing the `less` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("Less", 13, "") + op: Callable[..., BOOL] = Op(self, "Less", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Log( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Log(13)](https://onnx.ai/onnx/operators/onnx__Log.html#log-13 "Online Documentation") + + + Calculates the natural log of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Log", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Log", schema) + return op(*self._prepare_inputs(schema, input)) + + def LogSoftmax( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], axis: int = -1 + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LogSoftmax(13)](https://onnx.ai/onnx/operators/onnx__LogSoftmax.html#logsoftmax-13 "Online Documentation") + + + The operator computes the log of softmax values for the given input: + + LogSoftmax(input, axis) = Log(Softmax(input, axis=axis)) + + The "axis" attribute indicates the dimension along which LogSoftmax + will be performed. The output tensor has the same shape + and contains the LogSoftmax values of the corresponding input. + + + Args: + input: (differentiable) The input tensor of rank >= axis. + + axis: + Describes the dimension LogSoftmax will be performed on. + Negative + value means counting dimensions + from the back. Accepted range is [-r, + r-1] where r = rank(input). + """ + + schema = get_schema("LogSoftmax", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "LogSoftmax", schema + ) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Loop( + self, + M: Optional[INT64], + cond: Optional[BOOL], + *v_initial: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Loop(13)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-13 "Online Documentation") + + + Generic Looping construct. This loop has multiple termination conditions: + + 1) Trip count. Iteration count specified at runtime. Set by + specifying the input M. Optional. Set to empty string to omit. + Note that a static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. + 2) Loop termination condition. This is an input to the op that determines + whether to run the first iteration and also a loop-carried dependency for + the body graph. The body graph must yield a value for the condition variable, + whether this input is provided or not. + + This table summarizes the operating modes of this operator with equivalent + C-style code: + + Operator inputs defined as (max_trip_count, condition_var). + + input ("", ""): + for (int i=0; ; ++i) { + cond = ... // Note this value is ignored, but is required in the body + } + + input ("", cond) // Note this is analogous to a while loop + bool cond = ...; + for (int i=0; cond; ++i) { + cond = ...; + } + + input ("", 1) // Note this is analogous to a do-while loop + bool cond = true + for (int i=0; cond; ++i) { + cond = ...; + } + + input (trip_count, "") // Note this is analogous to a for loop + int trip_count = ... + for (int i=0; i < trip_count; ++i) { + cond = ...; // ignored + } + + input (trip_count, cond) + int trip_count = ...; + bool cond = ...; + for (int i=0; i < trip_count && cond; ++i) { + cond = ...; + } + + + *Sample usage - cond as well as trip count* + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + + *Sample equivalent C code* + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + + There are several things of note in this code snippet: + + 1) Values from the enclosing scope (i.e. variable "a" here) are in scope and can + be referenced in the inputs of the loop. + 2) Any values computed in the loop body that needs to be used in a subsequent + iteration or after the loop are modelled using a pair of variables in the loop-body, + consisting of an input variable (eg., b_in) and an output variable (eg., b_out). + These are referred to as loop-carried dependences. The loop operation node + supplies the input value of the input variable for the first iteration, and + returns the output value of the output variable produced by the final + iteration. + 3) Scan_output variables are used to implicitly concatenate values computed across + all the iterations. In the above example, the value of user_defined_val computed + over all iterations are concatenated and returned as the value of user_defined_vals + after the loop. + 4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + + Note that the semantics of this op support "diagonal" or "wavefront" execution. + (See Step 3 here for an example: + https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). + Frontends should emit multi-layer RNNs as a series of While operators (with + time being the inner looping dimension), with each successive layer consuming + the scan_outputs from the previous layer, possibly going through several + point-wise operators (e.g. dropout, residual connections, linear layer). + + The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order. + + + Args: + M: (optional) A maximum trip-count for the loop specified at runtime. + Optional. Pass empty string to skip. + + cond: (optional) A boolean termination condition. Optional. Pass empty + string to skip. + + v_initial: (variadic, heterogeneous) The initial values of any loop-carried + dependencies (values that change across loop iterations) + + body: The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + """ + + schema = get_schema("Loop", 13, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Loop", schema) + return op(*self._prepare_inputs(schema, M, cond, *v_initial), body=body) + + def MatMul( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 MatMul(13)](https://onnx.ai/onnx/operators/onnx__MatMul.html#matmul-13 "Online Documentation") + + + Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html + + + Args: + A: (differentiable) N-dimensional matrix A + + B: (differentiable) N-dimensional matrix B + """ + + schema = get_schema("MatMul", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "MatMul", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Max( + self, + *data_0: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Max(13)](https://onnx.ai/onnx/operators/onnx__Max.html#max-13 "Online Documentation") + + + Element-wise max of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic, differentiable) List of tensors for max. + """ + + schema = get_schema("Max", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Max", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Mean( + self, *data_0: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mean(13)](https://onnx.ai/onnx/operators/onnx__Mean.html#mean-13 "Online Documentation") + + + Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic, differentiable) List of tensors for mean. + """ + + schema = get_schema("Mean", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mean", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def MeanVarianceNormalization( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], axes: Sequence[int] = (0, 2, 3) + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MeanVarianceNormalization(13)](https://onnx.ai/onnx/operators/onnx__MeanVarianceNormalization.html#meanvariancenormalization-13 "Online Documentation") + + + A MeanVarianceNormalization Function: Perform mean variance normalization + on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ``` + + + Args: + X: (differentiable) Input tensor + + axes: A list of integers, along which to reduce. The default is to caculate + along axes [0,2,3] for calculating mean and variance along each channel. + Two variables with the same C-coordinate are associated with the same + mean and variance. + """ + + schema = get_schema("MeanVarianceNormalization", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "MeanVarianceNormalization", schema + ) + return op(*self._prepare_inputs(schema, X), axes=axes) + + def Min( + self, + *data_0: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Min(13)](https://onnx.ai/onnx/operators/onnx__Min.html#min-13 "Online Documentation") + + + Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic, differentiable) List of tensors for min. + """ + + schema = get_schema("Min", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Min", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Mod( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + fmod: int = 0, + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Mod(13)](https://onnx.ai/onnx/operators/onnx__Mod.html#mod-13 "Online Documentation") + + + Performs element-wise binary modulus (with Numpy-style broadcasting support). + The sign of the remainder is the same as that of the Divisor. + + Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend + (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. + This attribute is set to 0 by default causing the behavior to be like integer mod. + Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). + + If the input type is floating point, then `fmod` attribute must be set to 1. + + In case of dividend being zero, the results will be platform dependent. + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (differentiable) Dividend tensor + + B: (non-differentiable) Divisor tensor + + fmod: Whether the operator should behave like fmod (default=0 meaning it + will do integer mods); Set this to 1 to force fmod treatment + """ + + schema = get_schema("Mod", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Mod", schema) + return op(*self._prepare_inputs(schema, A, B), fmod=fmod) + + def Mul( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Mul(13)](https://onnx.ai/onnx/operators/onnx__Mul.html#mul-13 "Online Documentation") + + + Performs element-wise binary multiplication (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Mul", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "Mul", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Neg( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8]: + r"""[🌐 Neg(13)](https://onnx.ai/onnx/operators/onnx__Neg.html#neg-13 "Online Documentation") + + + Neg takes one input data (Tensor) and produces one output data + (Tensor) where each element flipped sign, y = -x, is applied to + the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Neg", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8] + ] = Op(self, "Neg", schema) + return op(*self._prepare_inputs(schema, X)) + + def NegativeLogLikelihoodLoss( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + target: Union[INT32, INT64], + weight: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + ignore_index: Optional[int] = None, + reduction: str = "mean", + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 NegativeLogLikelihoodLoss(13)](https://onnx.ai/onnx/operators/onnx__NegativeLogLikelihoodLoss.html#negativeloglikelihoodloss-13 "Online Documentation") + + + A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. + Its "input" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0. + The "input" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C). + The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes) + or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples. + The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as: + + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. + + When an optional "weight" is provided, the sample loss is calculated as: + + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. + + loss is zero for the case when target-value equals ignore_index. + + loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index + + If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk). + If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged: + + mean(loss), if "weight" is not provided, + + or if weight is provided, + + sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. + + If "reduction" attribute is set to "sum", the output is a scalar: + sum(loss). + + See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. + + Example 1: + + // negative log likelihood loss, "none" reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] + + // print(loss) + // [[-3. -2.] + // [-0. -2.]] + + Example 2: + + // weighted negative log likelihood loss, sum reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + + loss = np.sum(loss) + // print(loss) + // -1.1 + + Example 3: + + // weighted negative log likelihood loss, mean reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + weight_total = 0 + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + weight_total = weight_total + weight[c] + + loss = np.sum(loss) / weight_total + // print(loss) + // -1.57 + + + Args: + input: (differentiable) Input tensor of shape (N, C) or (N, C, d1, d2, ..., + dk). + + target: (non-differentiable) Target tensor of shape (N) or (N, d1, d2, ..., + dk). Target element value shall be in range of [0, C). If ignore_index + is specified, it may have a value outside [0, C) and the target values + should either be in the range [0, C) or have the value ignore_index. + + weight: (optional, non-differentiable) Optional rescaling weight tensor. If + given, it has to be a tensor of size C. Otherwise, it is treated as if + having all ones. + + ignore_index: Specifies a target value that is ignored and does not + contribute to the input gradient. It's an optional value. + + reduction: Type of reduction to apply to loss: none, sum, mean (default). + 'none': the output is the loss for each sample. 'sum': the output will + be summed. 'mean': the sum of the output will be divided by the sum of + applied weights. + """ + + schema = get_schema("NegativeLogLikelihoodLoss", 13, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "NegativeLogLikelihoodLoss", schema + ) + return op( + *self._prepare_inputs(schema, input, target, weight), + ignore_index=ignore_index, + reduction=reduction, + ) + + def NonZero( + self, + X: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 NonZero(13)](https://onnx.ai/onnx/operators/onnx__NonZero.html#nonzero-13 "Online Documentation") + + + Returns the indices of the elements that are non-zero + (in row-major order - by dimension). + NonZero behaves similar to numpy.nonzero: + https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, + but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy's behavior. + + + Args: + X: (non-differentiable) input + """ + + schema = get_schema("NonZero", 13, "") + op: Callable[..., INT64] = Op(self, "NonZero", schema) + return op(*self._prepare_inputs(schema, X)) + + def Pad( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + pads: INT64, + constant_value: Optional[ + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + mode: str = "constant", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Pad(13)](https://onnx.ai/onnx/operators/onnx__Pad.html#pad-13 "Online Documentation") + + + Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, + a padded tensor (`output`) is generated. + + The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`): + + 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False) + + 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis + + 3) `edge` - pads with the edge values of array + + + Example 1 (`constant` mode): + Insert 0 pads to the beginning of the second dimension. + + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = + [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + + + Example 2 (`reflect` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = + [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + + + Example 3 (`edge` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = + [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + + + + Args: + data: (differentiable) Input tensor. + + pads: (non-differentiable) Tensor of integers indicating the number of + padding elements to add or remove (if negative) at the beginning and end + of each axis. For 2D input tensor, it is the number of pixels. `pads` + should be a 1D tensor of shape [2 * input_rank]. `pads` format should + be: [x1_begin, x2_begin,...,x1_end, x2_end,...], where xi_begin is the + number of pad values added at the beginning of axis `i` and xi_end, the + number of pad values added at the end of axis `i`. + + constant_value: (optional, non-differentiable) (Optional) A scalar value to + be used if the mode chosen is `constant` (by default it is 0, empty + string or False). + + mode: Supported modes: `constant`(default), `reflect`, `edge` + """ + + schema = get_schema("Pad", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Pad", schema) + return op(*self._prepare_inputs(schema, data, pads, constant_value), mode=mode) + + def Pow( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64], + Y: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64]: + r"""[🌐 Pow(13)](https://onnx.ai/onnx/operators/onnx__Pow.html#pow-13 "Online Documentation") + + + Pow takes input data (Tensor) and exponent Tensor, and + produces one output data (Tensor) where the function `f(x) = x^exponent`, + is applied to the data tensor elementwise. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + X: (differentiable) First operand, base of the exponent. + + Y: (differentiable) Second operand, power of the exponent. + """ + + schema = get_schema("Pow", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64]] = Op( + self, "Pow", schema + ) + return op(*self._prepare_inputs(schema, X, Y)) + + def QuantizeLinear( + self, + x: Union[FLOAT, INT32], + y_scale: FLOAT, + y_zero_point: Optional[Union[INT8, UINT8]] = None, + axis: int = 1, + ) -> Union[INT8, UINT8]: + r"""[🌐 QuantizeLinear(13)](https://onnx.ai/onnx/operators/onnx__QuantizeLinear.html#quantizelinear-13 "Online Documentation") + + + The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. + The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. + The quantization formula is y = saturate ((x / y_scale) + y_zero_point). + For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. + For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type. + + + Args: + x: N-D full precision Input tensor to be quantized. + + y_scale: Scale for doing quantization to get 'y'. It can be a scalar, which + means per-tensor/layer quantization, or a 1-D Tensor for per-axis + quantization. + + y_zero_point: (optional) Zero point for doing quantization to get 'y'. Shape + must match y_scale. Default is uint8 with zero point of 0 if it's not + specified. + + axis: (Optional) The axis of the quantization dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + """ + + schema = get_schema("QuantizeLinear", 13, "") + op: Callable[..., Union[INT8, UINT8]] = Op(self, "QuantizeLinear", schema) + return op(*self._prepare_inputs(schema, x, y_scale, y_zero_point), axis=axis) + + def Reciprocal( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Reciprocal(13)](https://onnx.ai/onnx/operators/onnx__Reciprocal.html#reciprocal-13 "Online Documentation") + + + Reciprocal takes one input data (Tensor) and produces one output data + (Tensor) where the reciprocal is, y = 1/x, is applied to + the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Reciprocal", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "Reciprocal", schema + ) + return op(*self._prepare_inputs(schema, X)) + + def ReduceL1( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL1(13)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-13 "Online Documentation") + + + Computes the L1 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL1", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceL1", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceL2( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL2(13)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-13 "Online Documentation") + + + Computes the L2 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceL2", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceL2", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSum( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSum(13)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-13 "Online Documentation") + + + Computes the log sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSum", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceLogSum", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceLogSumExp( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSumExp(13)](https://onnx.ai/onnx/operators/onnx__ReduceLogSumExp.html#reducelogsumexp-13 "Online Documentation") + + + Computes the log sum exponent of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceLogSumExp", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceLogSumExp", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMax( + self, + data: Union[ + BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8 + ], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMax(13)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-13 "Online Documentation") + + + Computes the max of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMax", 13, "") + op: Callable[ + ..., + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + ] = Op(self, "ReduceMax", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMean( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMean(13)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-13 "Online Documentation") + + + Computes the mean of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMean", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceMean", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceMin( + self, + data: Union[ + BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8 + ], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMin(13)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-13 "Online Documentation") + + + Computes the min of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceMin", 13, "") + op: Callable[ + ..., + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + ] = Op(self, "ReduceMin", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceProd( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceProd(13)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-13 "Online Documentation") + + + Computes the product of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceProd", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceProd", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def ReduceSum( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSum(13)](https://onnx.ai/onnx/operators/onnx__ReduceSum.html#reducesum-13 "Online Documentation") + + + Computes the sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceSum", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceSum", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceSumSquare( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[Sequence[int]] = None, + keepdims: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSumSquare(13)](https://onnx.ai/onnx/operators/onnx__ReduceSumSquare.html#reducesumsquare-13 "Online Documentation") + + + Computes the sum square of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: A list of integers, along which to reduce. The default is to reduce + over all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + """ + + schema = get_schema("ReduceSumSquare", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceSumSquare", schema) + return op(*self._prepare_inputs(schema, data), axes=axes, keepdims=keepdims) + + def Relu( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Relu(13)](https://onnx.ai/onnx/operators/onnx__Relu.html#relu-13 "Online Documentation") + + + Relu takes one input data (Tensor) and produces one output data + (Tensor) where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Relu", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Relu", schema) + return op(*self._prepare_inputs(schema, X)) + + def Reshape( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: INT64, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Reshape(13)](https://onnx.ai/onnx/operators/onnx__Reshape.html#reshape-13 "Online Documentation") + + + Reshape the input tensor similar to numpy.reshape. + First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. + At most one dimension of the new shape can be -1. In this case, the value is + inferred from the size of the tensor and the remaining dimensions. A dimension + could also be 0, in which case the actual dimension value is unchanged (i.e. taken + from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. + The input tensor's shape and the output tensor's shape are required to have the same number of elements. + + Args: + data: (differentiable) An input tensor. + + shape: (non-differentiable) Specified shape for output. + """ + + schema = get_schema("Reshape", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Reshape", schema) + return op(*self._prepare_inputs(schema, data, shape)) + + def Resize( + self, + X: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + roi: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + scales: Optional[FLOAT] = None, + sizes: Optional[INT64] = None, + coordinate_transformation_mode: str = "half_pixel", + cubic_coeff_a: float = -0.75, + exclude_outside: int = 0, + extrapolation_value: float = 0.0, + mode: str = "nearest", + nearest_mode: str = "round_prefer_floor", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Resize(13)](https://onnx.ai/onnx/operators/onnx__Resize.html#resize-13 "Online Documentation") + + + Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \"sizes\" is not specified. + + + Args: + X: (differentiable) N-D tensor + + roi: (optional, non-differentiable) 1-D tensor given as [start1, ..., + startN, end1, ..., endN], where N is the rank of X. The RoIs' + coordinates are normalized in the coordinate system of the input image. + It only takes effect when coordinate_transformation_mode is + "tf_crop_and_resize" + + scales: (optional, non-differentiable) The scale array along each dimension. + It takes value greater than 0. If it's less than 1, it's sampling down, + otherwise, it's upsampling. The number of elements of 'scales' should be + the same as the rank of input 'X'. One of 'scales' and 'sizes' MUST be + specified and it is an error if both are specified. If 'sizes' is + needed, the user can use an empty string as the name of 'scales' in this + operator's input list. + + sizes: (optional, non-differentiable) The size of the output tensor. The + number of elements of 'sizes' should be the same as the rank of input + 'X'. Only one of 'scales' and 'sizes' can be specified. + + coordinate_transformation_mode: + This attribute describes how to transform + the coordinate in the resized tensor to the coordinate in the original + tensor.
+ + The coordinate of each dimension is transformed + individually. Let's describe a case using axis x as an example. + Denote + x_resized as the coordinate of axis x in the resized tensor, x_original + as the coordinate of axis x in the original tensor, length_original as + the length of the original tensor in axis x, length_resized as the + length of the resized tensor in axis x, roi_x = (start_x, end_x) of the + axis x in input "roi", scale = length_resized / length_original,
+ if coordinate_transformation_mode is "half_pixel",
+ x_original = + (x_resized + 0.5) / scale - 0.5,
+ + if + coordinate_transformation_mode is "pytorch_half_pixel",
+ x_original + = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0,
+ + if + coordinate_transformation_mode is "align_corners",
+ x_original = + x_resized * (length_original - 1) / (length_resized - 1),
+ + if + coordinate_transformation_mode is "asymmetric",
+ x_original = + x_resized / scale,
+ + if coordinate_transformation_mode is + "tf_crop_and_resize",
+ x_original = length_resized > 1 ? start_x * + (length_original - 1) + x_resized * (end_x - start_x) * (length_original + - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original + - 1). + + cubic_coeff_a: The coefficient 'a' used in cubic interpolation. Two common + choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). + Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 + for the details. This attribute is valid only if "mode" is "cubic". + + exclude_outside: If set to 1, the weight of sampling locations outside the + tensor will be set to 0 and the weight will be renormalized so that + their sum is 1.0. The default value is 0. + + extrapolation_value: When coordinate_transformation_mode is + "tf_crop_and_resize" and x_original is outside the range [0, + length_original - 1], this value is used as the corresponding output + value. Default is 0.0f. + + mode: Three interpolation modes: nearest (default), linear and cubic. The + "linear" mode includes linear interpolation for 1D tensor and N-linear + interpolation for N-D tensor (for example, bilinear interpolation for 2D + tensor). The "cubic" mode includes cubic interpolation for 1D tensor and + N-cubic interpolation for N-D tensor (for example, bicubic interpolation + for 2D tensor). + + nearest_mode: Four modes: round_prefer_floor (default, as known as round + half down), round_prefer_ceil (as known as round half up), floor, ceil. + Only used by nearest interpolation. It indicates how to get "nearest" + pixel in input tensor from x_original, so this attribute is valid only + if "mode" is "nearest". + """ + + schema = get_schema("Resize", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Resize", schema) + return op( + *self._prepare_inputs(schema, X, roi, scales, sizes), + coordinate_transformation_mode=coordinate_transformation_mode, + cubic_coeff_a=cubic_coeff_a, + exclude_outside=exclude_outside, + extrapolation_value=extrapolation_value, + mode=mode, + nearest_mode=nearest_mode, + ) + + def ScatterElements( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterElements(13)](https://onnx.ai/onnx/operators/onnx__ScatterElements.html#scatterelements-13 "Online Documentation") + + + ScatterElements takes three inputs `data`, `updates`, and `indices` of the same + rank r >= 1 and an optional attribute axis that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). The output of the operation + is produced by creating a copy of the input `data`, and then updating its value + to values specified by `updates` at specific index positions specified by + `indices`. Its output shape is the same as the shape of `data`. + + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension = axis is obtained from the value of the corresponding + entry in `indices` and the index-value for dimension != axis is obtained from the + index of the entry itself. + + For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry + is performed as below: + :: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + + + + This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. + + Example 1: + :: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + + + Example 2: + :: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, of r >= 1 (same + rank as input). All index values are expected to be within bounds [-s, + s-1] along axis of size s. It is an error if any of the index values are + out of bounds. + + updates: (differentiable) Tensor of rank r >=1 (same rank and shape as + indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("ScatterElements", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterElements", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), axis=axis) + + def ScatterND( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterND(13)](https://onnx.ai/onnx/operators/onnx__ScatterND.html#scatternd-13 "Online Documentation") + + + ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, + and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation + is produced by creating a copy of the input `data`, and then updating its value to values + specified by `updates` at specific index positions specified by `indices`. Its output shape + is the same as the shape of `data`. Note that `indices` should not have duplicate entries. + That is, two or more `updates` for the same index-location is not supported. + + `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`. + `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`. + Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an + update to a single element of the tensor. When k is less than rank(data) each update entry specifies an + update to a slice of the tensor. Index values are allowed to be negative, as per the usual + convention for counting backwards from the end, but are expected in the valid range. + + `updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the + first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. + The remaining dimensions of `updates` correspond to the dimensions of the + replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, + corresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates` + must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation + of shapes. + + The `output` is calculated via the following equation: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = updates[idx] + + The order of iteration in the above loop is not specified. + In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. + This ensures that the output value does not depend on the iteration order. + + This operator is the inverse of GatherND. + + Example 1: + :: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + + + + Example 2: + :: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of rank q >= 1. + + updates: (differentiable) Tensor of rank q + r - indices_shape[-1] - 1. + """ + + schema = get_schema("ScatterND", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterND", schema) + return op(*self._prepare_inputs(schema, data, indices, updates)) + + def Shape( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 Shape(13)](https://onnx.ai/onnx/operators/onnx__Shape.html#shape-13 "Online Documentation") + + + Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. + + + Args: + data: (non-differentiable) An input tensor. + """ + + schema = get_schema("Shape", 13, "") + op: Callable[..., INT64] = Op(self, "Shape", schema) + return op(*self._prepare_inputs(schema, data)) + + def Sigmoid( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sigmoid(13)](https://onnx.ai/onnx/operators/onnx__Sigmoid.html#sigmoid-13 "Online Documentation") + + + Sigmoid takes one input data (Tensor) and produces one output data + (Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the + tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Sigmoid", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "Sigmoid", schema + ) + return op(*self._prepare_inputs(schema, X)) + + def Sign( + self, + input: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Sign(13)](https://onnx.ai/onnx/operators/onnx__Sign.html#sign-13 "Online Documentation") + + + Calculate the sign of the given input tensor element-wise. + If input > 0, output 1. if input < 0, output -1. if input == 0, output 0. + + + Args: + input: (non-differentiable) Input tensor + """ + + schema = get_schema("Sign", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Sign", schema) + return op(*self._prepare_inputs(schema, input)) + + def Size( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 Size(13)](https://onnx.ai/onnx/operators/onnx__Size.html#size-13 "Online Documentation") + + + Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor. + + + Args: + data: (non-differentiable) An input tensor. + """ + + schema = get_schema("Size", 13, "") + op: Callable[..., INT64] = Op(self, "Size", schema) + return op(*self._prepare_inputs(schema, data)) + + def Slice( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + starts: Union[INT32, INT64], + ends: Union[INT32, INT64], + axes: Optional[Union[INT32, INT64]] = None, + steps: Optional[Union[INT32, INT64]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Slice(13)](https://onnx.ai/onnx/operators/onnx__Slice.html#slice-13 "Online Documentation") + + + Produces a slice of the input tensor along multiple axes. Similar to numpy: + https://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding + + Slice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor + of its input `data` tensor. + + An effective `start[i]`, `end[i]`, and `step[i]` must be computed for each `i` + in `[0, ... r-1]` where `r = rank(input)` as follows: + + If `axes` are omitted, they are set to `[0, ..., r-1]`. + If `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)` + + The effective values are initialized as `start[i] = 0`, `end[i] = dims[i]` where + `dims` are the dimensions of `input` and `step[i] = `1. + + All negative elements of `axes` are made non-negatve by adding `r` to them, where + `r =rank(input)`. + + All negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them, + where `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted + `starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping + and `[0, dims[axes[i]]-1]` for negative stepping. + + The clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must + accommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping + `end[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it + is clamped to `[-1, dims[axes[i]]-1]`. + + Finally, `step[axes[i]] = steps[i]`. + + For slicing to the end of a dimension with unknown size, it is recommended to pass + in `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward. + + Example 1: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + steps = [1, 2] + result = [ + [5, 7], + ] + Example 2: + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + starts = [0, 1] + ends = [-1, 1000] + result = [ + [2, 3, 4], + ] + + + Args: + data: (differentiable) Tensor of data to extract slices from. + + starts: (non-differentiable) 1-D tensor of starting indices of corresponding + axis in `axes` + + ends: (non-differentiable) 1-D tensor of ending indices (exclusive) of + corresponding axis in `axes` + + axes: (optional, non-differentiable) 1-D tensor of axes that `starts` and + `ends` apply to. Negative value means counting dimensions from the back. + Accepted range is [-r, r-1] where r = rank(data). Behavior is undefined + if an axis is repeated. + + steps: (optional, non-differentiable) 1-D tensor of slice step of + corresponding axis in `axes`. Negative value means slicing backward. + 'steps' cannot be 0. Defaults to 1s. + """ + + schema = get_schema("Slice", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Slice", schema) + return op(*self._prepare_inputs(schema, data, starts, ends, axes, steps)) + + def Softmax( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], axis: int = -1 + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Softmax(13)](https://onnx.ai/onnx/operators/onnx__Softmax.html#softmax-13 "Online Documentation") + + + The operator computes the normalized exponential values for the given input: + + Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, keepdims=1) + + The "axis" attribute indicates the dimension along which Softmax + will be performed. The output tensor has the same shape + and contains the Softmax values of the corresponding input. + + + Args: + input: (differentiable) The input tensor of rank >= axis. + + axis: + Describes the dimension Softmax will be performed on. + Negative value + means counting dimensions + from the back. Accepted range is [-r, r-1] + where r = rank(input). + """ + + schema = get_schema("Softmax", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "Softmax", schema + ) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def SoftmaxCrossEntropyLoss( + self, + scores: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + labels: Union[INT32, INT64], + weights: Optional[Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = None, + ignore_index: Optional[int] = None, + reduction: str = "mean", + ) -> Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ]: + r"""[🌐 SoftmaxCrossEntropyLoss(13)](https://onnx.ai/onnx/operators/onnx__SoftmaxCrossEntropyLoss.html#softmaxcrossentropyloss-13 "Online Documentation") + + Loss function that measures the softmax cross entropy + between 'scores' and 'labels'. + This operator first computes a loss tensor whose shape is identical to the labels input. + If the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N). + If the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), + the loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. + After L is available, this operator can optionally do a reduction operator. + + shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. + shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. + + The loss for one sample, l_i, can caculated as follows: + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. + or + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. + + loss is zero for the case when label-value equals ignore_index. + l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index + + where: + p = Softmax(scores) + y = Log(p) + c = labels[i][d1][d2]...[dk] + + Finally, L is optionally reduced: + If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). + If reduction = 'sum', the output is scalar: Sum(L). + If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), + where tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]. + + + Args: + scores: (differentiable) The predicted outputs with shape [batch_size, + class_size], or [batch_size, class_size, D1, D2 , ..., Dk], where K is + the number of dimensions. + + labels: (non-differentiable) The ground truth output tensor, with shape + [batch_size], or [batch_size, D1, D2, ..., Dk], where K is the number of + dimensions. Labels element value shall be in range of [0, C). If + ignore_index is specified, it may have a value outside [0, C) and the + label values should either be in the range [0, C) or have the value + ignore_index. + + weights: (optional, non-differentiable) A manual rescaling weight given to + each class. If given, it has to be a 1D Tensor assigning weight to each + of the classes. Otherwise, it is treated as if having all ones. + + ignore_index: Specifies a target value that is ignored and does not + contribute to the input gradient. It's an optional value. + + reduction: Type of reduction to apply to loss: none, sum, mean(default). + 'none': no reduction will be applied, 'sum': the output will be summed. + 'mean': the sum of the output will be divided by the number of elements + in the output. + """ + + schema = get_schema("SoftmaxCrossEntropyLoss", 13, "") + op: Callable[ + ..., + Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "SoftmaxCrossEntropyLoss", schema) + return op( + *self._prepare_inputs(schema, scores, labels, weights), + ignore_index=ignore_index, + reduction=reduction, + ) + + def SpaceToDepth( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + blocksize: Optional[int] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 SpaceToDepth(13)](https://onnx.ai/onnx/operators/onnx__SpaceToDepth.html#spacetodepth-13 "Online Documentation") + + SpaceToDepth rearranges blocks of spatial data into depth. More specifically, + this op outputs a copy of the input tensor where values from the height and width dimensions + are moved to the depth dimension. + + + Args: + input: (differentiable) Input tensor of [N,C,H,W], where N is the batch + axis, C is the channel or depth, H is the height and W is the width. + + blocksize: Blocks of [blocksize, blocksize] are moved. + """ + + schema = get_schema("SpaceToDepth", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "SpaceToDepth", schema) + return op(*self._prepare_inputs(schema, input), blocksize=blocksize) + + def Split( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + split: Optional[INT64] = None, + axis: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Split(13)](https://onnx.ai/onnx/operators/onnx__Split.html#split-13 "Online Documentation") + + Split a tensor into a list of tensors, along the specified + 'axis'. Lengths of the parts can be specified using input 'split'. + Otherwise, the tensor is split to equal sized parts. + + + Args: + input: (differentiable) The tensor to split + + split: (optional, non-differentiable) Optional length of each output. Values + should be >= 0.Sum of the values must be equal to the dim value at + 'axis' specified. + + axis: Which axis to split on. A negative value means counting dimensions + from the back. Accepted range is [-rank, rank-1] where r = rank(input). + """ + + schema = get_schema("Split", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Split", schema) + return op(*self._prepare_inputs(schema, input, split), axis=axis) + + def Sqrt( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sqrt(13)](https://onnx.ai/onnx/operators/onnx__Sqrt.html#sqrt-13 "Online Documentation") + + + Square root takes one input data (Tensor) and produces one output data + (Tensor) where the square root is, y = x^0.5, is applied to + the tensor elementwise. If x is negative, then it will return NaN. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Sqrt", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sqrt", schema) + return op(*self._prepare_inputs(schema, X)) + + def Squeeze( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: Optional[INT64] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Squeeze(13)](https://onnx.ai/onnx/operators/onnx__Squeeze.html#squeeze-13 "Online Documentation") + + + Remove single-dimensional entries from the shape of a tensor. + Takes an input `axes` with a list of axes to squeeze. + If `axes` is not provided, all the single dimensions will be removed from + the shape. If an axis is selected with shape entry not equal to one, an error is raised. + + + Args: + data: (differentiable) Tensors with at least max(dims) dimensions. + + axes: (optional, non-differentiable) List of integers indicating the + dimensions to squeeze. Negative value means counting dimensions from the + back. Accepted range is [-r, r-1] where r = rank(data). + """ + + schema = get_schema("Squeeze", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Squeeze", schema) + return op(*self._prepare_inputs(schema, data, axes)) + + def Sub( + self, + A: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Sub(13)](https://onnx.ai/onnx/operators/onnx__Sub.html#sub-13 "Online Documentation") + + + Performs element-wise binary subtraction (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Sub", 13, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "Sub", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Sum( + self, *data_0: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sum(13)](https://onnx.ai/onnx/operators/onnx__Sum.html#sum-13 "Online Documentation") + + + Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic, differentiable) List of tensors for sum. + """ + + schema = get_schema("Sum", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sum", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Tanh( + self, input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Tanh(13)](https://onnx.ai/onnx/operators/onnx__Tanh.html#tanh-13 "Online Documentation") + + + Calculates the hyperbolic tangent of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Tanh", 13, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "Tanh", schema) + return op(*self._prepare_inputs(schema, input)) + + def Tile( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + repeats: INT64, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Tile(13)](https://onnx.ai/onnx/operators/onnx__Tile.html#tile-13 "Online Documentation") + + Constructs a tensor by tiling a given tensor. + This is the same as function `tile` in Numpy, but no broadcast. + For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] + + + Args: + input: (differentiable) Input tensor of any shape. + + repeats: (non-differentiable) 1D int64 tensor of the same length as input's + dimension number, includes numbers of repeated copies along input's + dimensions. + """ + + schema = get_schema("Tile", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Tile", schema) + return op(*self._prepare_inputs(schema, input, repeats)) + + def Transpose( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + perm: Optional[Sequence[int]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Transpose(13)](https://onnx.ai/onnx/operators/onnx__Transpose.html#transpose-13 "Online Documentation") + + + Transpose the input tensor similar to numpy.transpose. For example, when + perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape + will be (2, 1, 3). + + + Args: + data: (differentiable) An input tensor. + + perm: A list of integers. By default, reverse the dimensions, otherwise + permute the axes according to the values given. + """ + + schema = get_schema("Transpose", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Transpose", schema) + return op(*self._prepare_inputs(schema, data), perm=perm) + + def Unsqueeze( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axes: INT64, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Unsqueeze(13)](https://onnx.ai/onnx/operators/onnx__Unsqueeze.html#unsqueeze-13 "Online Documentation") + + + Insert single-dimensional entries to the shape of an input tensor (`data`). + Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`). + + For example: + Given an input tensor (`data`) of shape [3, 4, 5], then + Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. + + The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates. + The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`. + Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. + The order of values in `axes` does not matter and can come in any order. + + + + Args: + data: (differentiable) Original tensor + + axes: (non-differentiable) List of integers indicating the dimensions to be + inserted. Negative value means counting dimensions from the back. + Accepted range is [-r, r-1] where r = rank(expanded). + """ + + schema = get_schema("Unsqueeze", 13, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Unsqueeze", schema) + return op(*self._prepare_inputs(schema, data, axes)) diff --git a/onnxscript/onnx_opset/_impl/opset14.py b/onnxscript/onnx_opset/_impl/opset14.py new file mode 100644 index 0000000000..30ace01d7a --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset14.py @@ -0,0 +1,1399 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset13 import Opset13 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset14(Opset13): + def __new__(cls): + return Opset.__new__(cls, "", 14) + + def __init__(self): + super().__init__() + + def Add( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Add(14)](https://onnx.ai/onnx/operators/onnx__Add.html#add-14 "Online Documentation") + + + Performs element-wise binary addition (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Add", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Add", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def BatchNormalization( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + scale: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + input_mean: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + input_var: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + momentum: float = 0.8999999761581421, + training_mode: int = 0, + ) -> Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(14)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-14 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + There are five required inputs 'X', 'scale', 'B', 'input_mean' and + 'input_var'. + Note that 'input_mean' and 'input_var' are expected to be the estimated + statistics in inference mode (training_mode=False, default), + and the running statistics in training mode (training_mode=True). + There are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, running_mean, running_var (training_mode=True) + Output case #2: Y (training_mode=False) + + When training_mode=False, extra outputs are invalid. + The outputs are updated as follows when training_mode=True: + :: + + running_mean = input_mean * momentum + current_mean * (1 - momentum) + running_var = input_var * momentum + current_var * (1 - momentum) + + Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B + + where: + + current_mean = ReduceMean(X, axis=all_except_channel_index) + current_var = ReduceVar(X, axis=all_except_channel_index) + + Notice that ReduceVar refers to the population variance, and it equals to + sum(sqrd(x_i - x_avg)) / N + where N is the population size (this formula does not use sample size N - 1). + + + + + When training_mode=False: + :: + + Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B + + + + For previous (depreciated) non-spatial cases, implementors are suggested + to flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, + C is the number of channels. Statistics are computed for every channel + of C over N and D1 to Dn dimensions. For image data, input dimensions + become (N x C x H x W). The op also accepts single dimension input of + size N in which case C is assumed to be 1 + + scale: (differentiable) Scale tensor of shape (C). + + B: (differentiable) Bias tensor of shape (C). + + input_mean: (differentiable) running (training) or estimated (testing) mean + tensor of shape (C). + + input_var: (differentiable) running (training) or estimated (testing) + variance tensor of shape (C). + + epsilon: The epsilon value to use to avoid division by zero. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum). + + training_mode: If set to true, it indicates BatchNormalization is being used + for training, and outputs 1, 2, 3, and 4 would be populated. + """ + + schema = get_schema("BatchNormalization", 14, "") + op: Callable[ + ..., + Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, input_mean, input_var), + epsilon=epsilon, + momentum=momentum, + training_mode=training_mode, + ) + + def CumSum( + self, + x: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axis: Union[INT32, INT64], + exclusive: int = 0, + reverse: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 CumSum(14)](https://onnx.ai/onnx/operators/onnx__CumSum.html#cumsum-14 "Online Documentation") + + + Performs cumulative sum of the input elements along the given axis. + By default, it will do the sum inclusively meaning the first element is copied as is. + Through an `exclusive` attribute, this behavior can change to exclude the first element. + It can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1. + + Example: + :: + + input_x = [1, 2, 3] + axis=0 + output = [1, 3, 6] + exclusive=1 + output = [0, 1, 3] + exclusive=0 + reverse=1 + output = [6, 5, 3] + exclusive=1 + reverse=1 + output = [5, 3, 0] + + + + + Args: + x: (differentiable) An input tensor that is to be processed. + + axis: (non-differentiable) A 0-D tensor. Must be in the range [-rank(x), + rank(x)-1]. Negative value means counting dimensions from the back. + + exclusive: If set to 1 will return exclusive sum in which the top element is + not included. In other terms, if set to 1, the j-th output element would + be the sum of the first (j-1) elements. Otherwise, it would be the sum + of the first j elements. + + reverse: If set to 1 will perform the sums in reverse direction. + """ + + schema = get_schema("CumSum", 14, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "CumSum", schema) + return op(*self._prepare_inputs(schema, x, axis), exclusive=exclusive, reverse=reverse) + + def Div( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Div(14)](https://onnx.ai/onnx/operators/onnx__Div.html#div-14 "Online Documentation") + + + Performs element-wise binary division (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Div", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Div", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def GRU( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + layout: int = 0, + linear_before_reset: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 GRU(14)](https://onnx.ai/onnx/operators/onnx__GRU.html#gru-14 "Online Documentation") + + + Computes an one-layer GRU. This operator is usually supported via some custom + implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `z` - update gate + + `r` - reset gate + + `h` - hidden gate + + `t` - time step (t-1 means previous time step) + + `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates + + `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates + + `Wb[zrh]` - W bias vectors for update, reset, and hidden gates + + `Rb[zrh]` - R bias vectors for update, reset, and hidden gates + + `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates + + `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates + + `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates + + `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh): + + - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) + + - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) + + - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 + + - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 + + - Ht = (1 - zt) (.) ht + zt (.) Ht-1 + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) The input sequences packed (and potentially padded) into + one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`. + + W: (differentiable) The weight tensor for the gates. Concatenation of + `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor + has shape `[num_directions, 3*hidden_size, input_size]`. + + R: (differentiable) The recurrence weight tensor. Concatenation of `R[zrh]` + and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has + shape `[num_directions, 3*hidden_size, hidden_size]`. + + B: (optional, differentiable) The bias tensor for the gates. Concatenation + of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) + along dimension 0. This tensor has shape `[num_directions, + 6*hidden_size]`. Optional: If not specified - assumed to be 0 + + sequence_lens: (optional, non-differentiable) Optional tensor specifying + lengths of the sequences in a batch. If not specified - assumed all + sequences in the batch to have length `seq_length`. It has shape + `[batch_size]`. + + initial_h: (optional, non-differentiable) Optional initial value of the + hidden. If not specified - assumed to be 0. It has shape + `[num_directions, batch_size, hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 2 (or 4 if bidirectional) activation functions for + update, reset, and hidden gates. The activation functions must be one of + the activation functions specified above. Optional: See the equations + for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, + the following shapes are expected: X.shape = [seq_length, batch_size, + input_size], Y.shape = [seq_length, num_directions, batch_size, + hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, + hidden_size]. If 1, the following shapes are expected: X.shape = + [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, + num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, + num_directions, hidden_size]. + + linear_before_reset: When computing the output of the hidden gate, apply the + linear transformation before multiplying by the output of the reset + gate. + """ + + schema = get_schema("GRU", 14, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "GRU", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + layout=layout, + linear_before_reset=linear_before_reset, + ) + + def HardSwish(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 HardSwish(14)](https://onnx.ai/onnx/operators/onnx__HardSwish.html#hardswish-14 "Online Documentation") + + + HardSwish takes one input data (Tensor) and produces one output data (Tensor) where + the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid(x), + where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("HardSwish", 14, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "HardSwish", schema) + return op(*self._prepare_inputs(schema, X)) + + def Identity( + self, + input: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Identity(14)](https://onnx.ai/onnx/operators/onnx__Identity.html#identity-14 "Online Documentation") + + Identity operator + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Identity", 14, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Identity", schema) + return op(*self._prepare_inputs(schema, input)) + + def LSTM( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + initial_c: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + P: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + input_forget: int = 0, + layout: int = 0, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 LSTM(14)](https://onnx.ai/onnx/operators/onnx__LSTM.html#lstm-14 "Online Documentation") + + + Computes an one-layer LSTM. This operator is usually supported via some + custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `o` - output gate + + `f` - forget gate + + `c` - cell gate + + `t` - time step (t-1 means previous time step) + + `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates + + `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates + + `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates + + `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates + + `P[iof]` - P peephole weight vector for input, output, and forget gates + + `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates + + `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates + + `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates + + `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates + + `PB[iof]` - P peephole weight vector for backward input, output, and forget gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): + + - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) + + - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) + + - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) + + - Ct = ft (.) Ct-1 + it (.) ct + + - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) + + - Ht = ot (.) h(Ct) + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) The input sequences packed (and potentially padded) into + one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`. + + W: (differentiable) The weight tensor for the gates. Concatenation of + `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The + tensor has shape `[num_directions, 4*hidden_size, input_size]`. + + R: (differentiable) The recurrence weight tensor. Concatenation of `R[iofc]` + and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has + shape `[num_directions, 4*hidden_size, hidden_size]`. + + B: (optional, differentiable) The bias tensor for input gate. Concatenation + of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if + bidirectional) along dimension 0. This tensor has shape + `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed + to be 0. + + sequence_lens: (optional, non-differentiable) Optional tensor specifying + lengths of the sequences in a batch. If not specified - assumed all + sequences in the batch to have length `seq_length`. It has shape + `[batch_size]`. + + initial_h: (optional, non-differentiable) Optional initial value of the + hidden. If not specified - assumed to be 0. It has shape + `[num_directions, batch_size, hidden_size]`. + + initial_c: (optional, non-differentiable) Optional initial value of the + cell. If not specified - assumed to be 0. It has shape `[num_directions, + batch_size, hidden_size]`. + + P: (optional, differentiable) The weight tensor for peepholes. Concatenation + of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has + shape `[num_directions, 3*hidde_size]`. Optional: If not specified - + assumed to be 0. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 3 (or 6 if bidirectional) activation functions for + input, output, forget, cell, and hidden. The activation functions must + be one of the activation functions specified above. Optional: See the + equations for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + input_forget: Couple the input and forget gates if 1. + + layout: The shape format of inputs X, initial_h, initial_c and outputs Y, + Y_h, Y_c. If 0, the following shapes are expected: X.shape = + [seq_length, batch_size, input_size], Y.shape = [seq_length, + num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = + initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. + If 1, the following shapes are expected: X.shape = [batch_size, + seq_length, input_size], Y.shape = [batch_size, seq_length, + num_directions, hidden_size], initial_h.shape = Y_h.shape = + initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size]. + """ + + schema = get_schema("LSTM", 14, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "LSTM", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h, initial_c, P), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + input_forget=input_forget, + layout=layout, + ) + + def Mul( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Mul(14)](https://onnx.ai/onnx/operators/onnx__Mul.html#mul-14 "Online Documentation") + + + Performs element-wise binary multiplication (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Mul", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Mul", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def RNN( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Sequence[str] = ("Tanh", "Tanh"), + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + layout: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 RNN(14)](https://onnx.ai/onnx/operators/onnx__RNN.html#rnn-14 "Online Documentation") + + + Computes an one-layer simple RNN. This operator is usually supported + via some custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `t` - time step (t-1 means previous time step) + + `Wi` - W parameter weight matrix for input gate + + `Ri` - R recurrence weight matrix for input gate + + `Wbi` - W parameter bias vector for input gate + + `Rbi` - R parameter bias vector for input gate + + `WBi` - W parameter weight matrix for backward input gate + + `RBi` - R recurrence weight matrix for backward input gate + + `WBbi` - WR bias vectors for backward input gate + + `RBbi` - RR bias vectors for backward input gate + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Tanh): + + - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) The input sequences packed (and potentially padded) into + one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`. + + W: (differentiable) The weight tensor for input gate. Concatenation of `Wi` + and `WBi` (if bidirectional). The tensor has shape `[num_directions, + hidden_size, input_size]`. + + R: (differentiable) The recurrence weight tensor. Concatenation of `Ri` and + `RBi` (if bidirectional). The tensor has shape `[num_directions, + hidden_size, hidden_size]`. + + B: (optional, differentiable) The bias tensor for input gate. Concatenation + of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has + shape `[num_directions, 2*hidden_size]`. Optional: If not specified - + assumed to be 0. + + sequence_lens: (optional, non-differentiable) Optional tensor specifying + lengths of the sequences in a batch. If not specified - assumed all + sequences in the batch to have length `seq_length`. It has shape + `[batch_size]`. + + initial_h: (optional, non-differentiable) Optional initial value of the + hidden. If not specified - assumed to be 0. It has shape + `[num_directions, batch_size, hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: One (or two if bidirectional) activation function for input + gate. The activation function must be one of the activation functions + specified above. Optional: Default `Tanh` if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + layout: The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, + the following shapes are expected: X.shape = [seq_length, batch_size, + input_size], Y.shape = [seq_length, num_directions, batch_size, + hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, + hidden_size]. If 1, the following shapes are expected: X.shape = + [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, + num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, + num_directions, hidden_size]. + """ + + schema = get_schema("RNN", 14, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "RNN", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + layout=layout, + ) + + def Relu( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8] + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8]: + r"""[🌐 Relu(14)](https://onnx.ai/onnx/operators/onnx__Relu.html#relu-14 "Online Documentation") + + + Relu takes one input data (Tensor) and produces one output data + (Tensor) where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Relu", 14, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8] + ] = Op(self, "Relu", schema) + return op(*self._prepare_inputs(schema, X)) + + def Reshape( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: INT64, + allowzero: int = 0, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Reshape(14)](https://onnx.ai/onnx/operators/onnx__Reshape.html#reshape-14 "Online Documentation") + + + Reshape the input tensor similar to numpy.reshape. + First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. + At most one dimension of the new shape can be -1. In this case, the value is + inferred from the size of the tensor and the remaining dimensions. A dimension + could also be 0, in which case the actual dimension value is unchanged (i.e. taken + from the input tensor). If 'allowzero' is set, and the new shape includes 0, the + dimension will be set explicitly to zero (i.e. not taken from input tensor). + Shape (second input) could be an empty shape, which means converting to a scalar. + The input tensor's shape and the output tensor's shape are required to have the same number of elements. + + If the attribute 'allowzero' is set, it is invalid for the specified shape to + contain both a zero value and -1, as the value of the dimension corresponding + to -1 cannot be determined uniquely. + + + Args: + data: (differentiable) An input tensor. + + shape: (non-differentiable) Specified shape for output. + + allowzero: (Optional) By default, when any value in the 'shape' input is + equal to zero the corresponding dimension value is copied from the input + tensor dynamically. allowzero=1 indicates that if any value in the + 'shape' input is set to zero, the zero value is honored, similar to + NumPy. + """ + + schema = get_schema("Reshape", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Reshape", schema) + return op(*self._prepare_inputs(schema, data, shape), allowzero=allowzero) + + def Sub( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Sub(14)](https://onnx.ai/onnx/operators/onnx__Sub.html#sub-14 "Online Documentation") + + + Performs element-wise binary subtraction (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. + + + Args: + A: (differentiable) First operand. + + B: (differentiable) Second operand. + """ + + schema = get_schema("Sub", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Sub", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Trilu( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + k: Optional[INT64] = None, + upper: int = 1, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Trilu(14)](https://onnx.ai/onnx/operators/onnx__Trilu.html#trilu-14 "Online Documentation") + + + Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). + The attribute "upper" determines whether the upper or lower part is retained. If set to true, + the upper triangular matrix is retained. Lower triangular matrix is retained otherwise. + Default value for the "upper" attribute is true. + Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions. The upper triangular part consists + of the elements on and above the given diagonal (k). The lower triangular part consists of elements on and below the diagonal. + All other elements in the matrix are set to zero. + If k = 0, the triangular part on and above/below the main diagonal is retained. + If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it. + A negative k value retains the main diagonal and |k| diagonals below it. + If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. + A negative k value excludes the main diagonal and (|k|-1) diagonals below it. + + + Args: + input: (differentiable) Input tensor of rank 2 or higher. + + k: (optional, non-differentiable) A 0-D tensor containing a single value + corresponding to the number diagonals above or below the main diagonal + to exclude or include. Default value is 0 if it's not specified. + + upper: Boolean. Indicates whether upper or lower part of matrix is retained. + Default is true. + """ + + schema = get_schema("Trilu", 14, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Trilu", schema) + return op(*self._prepare_inputs(schema, input, k), upper=upper) diff --git a/onnxscript/onnx_opset/_impl/opset15.py b/onnxscript/onnx_opset/_impl/opset15.py new file mode 100644 index 0000000000..4e918e67ee --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset15.py @@ -0,0 +1,698 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable +from typing import Optional as _Optional +from typing import Sequence, Tuple, Union + +from onnx import TypeProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset14 import Opset14 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset15(Opset14): + def __new__(cls): + return Opset.__new__(cls, "", 15) + + def __init__(self): + super().__init__() + + def BatchNormalization( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + scale: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + B: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + input_mean: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + input_var: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + momentum: float = 0.8999999761581421, + training_mode: int = 0, + ) -> Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(15)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-15 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + There are five required inputs 'X', 'scale', 'B', 'input_mean' and + 'input_var'. + Note that 'input_mean' and 'input_var' are expected to be the estimated + statistics in inference mode (training_mode=False, default), + and the running statistics in training mode (training_mode=True). + There are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, running_mean, running_var (training_mode=True) + Output case #2: Y (training_mode=False) + + When training_mode=False, extra outputs are invalid. + The outputs are updated as follows when training_mode=True: + :: + + running_mean = input_mean * momentum + current_mean * (1 - momentum) + running_var = input_var * momentum + current_var * (1 - momentum) + + Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B + + where: + + current_mean = ReduceMean(X, axis=all_except_channel_index) + current_var = ReduceVar(X, axis=all_except_channel_index) + + Notice that ReduceVar refers to the population variance, and it equals to + sum(sqrd(x_i - x_avg)) / N + where N is the population size (this formula does not use sample size N - 1). + + + + + The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs. + + When training_mode=False: + :: + + Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B + + + + For previous (depreciated) non-spatial cases, implementors are suggested + to flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, + C is the number of channels. Statistics are computed for every channel + of C over N and D1 to Dn dimensions. For image data, input dimensions + become (N x C x H x W). The op also accepts single dimension input of + size N in which case C is assumed to be 1 + + scale: (differentiable) Scale tensor of shape (C). + + B: (differentiable) Bias tensor of shape (C). + + input_mean: (differentiable) running (training) or estimated (testing) mean + tensor of shape (C). + + input_var: (differentiable) running (training) or estimated (testing) + variance tensor of shape (C). + + epsilon: The epsilon value to use to avoid division by zero. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum). + + training_mode: If set to true, it indicates BatchNormalization is being used + for training, and outputs 1, 2, 3, and 4 would be populated. + """ + + schema = get_schema("BatchNormalization", 15, "") + op: Callable[ + ..., + Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, input_mean, input_var), + epsilon=epsilon, + momentum=momentum, + training_mode=training_mode, + ) + + def Bernoulli( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + dtype: _Optional[int] = None, + seed: _Optional[float] = None, + ) -> Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Bernoulli(15)](https://onnx.ai/onnx/operators/onnx__Bernoulli.html#bernoulli-15 "Online Documentation") + + + Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor + containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, + where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p). + + This operator is non-deterministic and may not produce the same values in different + implementations (even if a seed is specified). + + + Args: + input: All values in input have to be in the range:[0, 1]. + + dtype: The data type for the elements of the output tensor. if not + specified, we will use the data type of the input tensor. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("Bernoulli", 15, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Bernoulli", schema) + return op(*self._prepare_inputs(schema, input), dtype=dtype, seed=seed) + + def CastLike( + self, + input: Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + target_type: Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 CastLike(15)](https://onnx.ai/onnx/operators/onnx__CastLike.html#castlike-15 "Online Documentation") + + + The operator casts the elements of a given input tensor (the first input) to + the same data type as the elements of the second input tensor. + See documentation of the Cast operator for further details. + + + Args: + input: (differentiable) Input tensor to be cast. + + target_type: (non-differentiable) The (first) input tensor will be cast to + produce a tensor of the same type as this (second input) tensor. + """ + + schema = get_schema("CastLike", 15, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "CastLike", schema) + return op(*self._prepare_inputs(schema, input, target_type)) + + def Optional( + self, + input: _Optional[ + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + type: _Optional[TypeProto] = None, + ) -> Union[ + _Optional[Sequence[BOOL]], + _Optional[Sequence[COMPLEX128]], + _Optional[Sequence[COMPLEX64]], + _Optional[Sequence[DOUBLE]], + _Optional[Sequence[FLOAT]], + _Optional[Sequence[FLOAT16]], + _Optional[Sequence[INT16]], + _Optional[Sequence[INT32]], + _Optional[Sequence[INT64]], + _Optional[Sequence[INT8]], + _Optional[Sequence[STRING]], + _Optional[Sequence[UINT16]], + _Optional[Sequence[UINT32]], + _Optional[Sequence[UINT64]], + _Optional[Sequence[UINT8]], + _Optional[BOOL], + _Optional[COMPLEX128], + _Optional[COMPLEX64], + _Optional[DOUBLE], + _Optional[FLOAT], + _Optional[FLOAT16], + _Optional[INT16], + _Optional[INT32], + _Optional[INT64], + _Optional[INT8], + _Optional[STRING], + _Optional[UINT16], + _Optional[UINT32], + _Optional[UINT64], + _Optional[UINT8], + ]: + r"""[🌐 Optional(15)](https://onnx.ai/onnx/operators/onnx__Optional.html#optional-15 "Online Documentation") + + + Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, + or a non-empty value containing the input element. + + + Args: + input: (optional) The input element. + + type: Type of the element in the optional output + """ + + schema = get_schema("Optional", 15, "") + op: Callable[ + ..., + Union[ + _Optional[Sequence[BOOL]], + _Optional[Sequence[COMPLEX128]], + _Optional[Sequence[COMPLEX64]], + _Optional[Sequence[DOUBLE]], + _Optional[Sequence[FLOAT]], + _Optional[Sequence[FLOAT16]], + _Optional[Sequence[INT16]], + _Optional[Sequence[INT32]], + _Optional[Sequence[INT64]], + _Optional[Sequence[INT8]], + _Optional[Sequence[STRING]], + _Optional[Sequence[UINT16]], + _Optional[Sequence[UINT32]], + _Optional[Sequence[UINT64]], + _Optional[Sequence[UINT8]], + _Optional[BOOL], + _Optional[COMPLEX128], + _Optional[COMPLEX64], + _Optional[DOUBLE], + _Optional[FLOAT], + _Optional[FLOAT16], + _Optional[INT16], + _Optional[INT32], + _Optional[INT64], + _Optional[INT8], + _Optional[STRING], + _Optional[UINT16], + _Optional[UINT32], + _Optional[UINT64], + _Optional[UINT8], + ], + ] = Op(self, "Optional", schema) + return op(*self._prepare_inputs(schema, input), type=type) + + def OptionalGetElement( + self, + input: Union[ + _Optional[Sequence[BOOL]], + _Optional[Sequence[COMPLEX128]], + _Optional[Sequence[COMPLEX64]], + _Optional[Sequence[DOUBLE]], + _Optional[Sequence[FLOAT]], + _Optional[Sequence[FLOAT16]], + _Optional[Sequence[INT16]], + _Optional[Sequence[INT32]], + _Optional[Sequence[INT64]], + _Optional[Sequence[INT8]], + _Optional[Sequence[STRING]], + _Optional[Sequence[UINT16]], + _Optional[Sequence[UINT32]], + _Optional[Sequence[UINT64]], + _Optional[Sequence[UINT8]], + _Optional[BOOL], + _Optional[COMPLEX128], + _Optional[COMPLEX64], + _Optional[DOUBLE], + _Optional[FLOAT], + _Optional[FLOAT16], + _Optional[INT16], + _Optional[INT32], + _Optional[INT64], + _Optional[INT8], + _Optional[STRING], + _Optional[UINT16], + _Optional[UINT32], + _Optional[UINT64], + _Optional[UINT8], + ], + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 OptionalGetElement(15)](https://onnx.ai/onnx/operators/onnx__OptionalGetElement.html#optionalgetelement-15 "Online Documentation") + + + Outputs the element in the optional-type input. It is an error if the input value does not have an element + and the behavior is undefined in this case. + + + Args: + input: The optional input. + """ + + schema = get_schema("OptionalGetElement", 15, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "OptionalGetElement", schema) + return op(*self._prepare_inputs(schema, input)) + + def OptionalHasElement( + self, + input: Union[ + _Optional[Sequence[BOOL]], + _Optional[Sequence[COMPLEX128]], + _Optional[Sequence[COMPLEX64]], + _Optional[Sequence[DOUBLE]], + _Optional[Sequence[FLOAT]], + _Optional[Sequence[FLOAT16]], + _Optional[Sequence[INT16]], + _Optional[Sequence[INT32]], + _Optional[Sequence[INT64]], + _Optional[Sequence[INT8]], + _Optional[Sequence[STRING]], + _Optional[Sequence[UINT16]], + _Optional[Sequence[UINT32]], + _Optional[Sequence[UINT64]], + _Optional[Sequence[UINT8]], + _Optional[BOOL], + _Optional[COMPLEX128], + _Optional[COMPLEX64], + _Optional[DOUBLE], + _Optional[FLOAT], + _Optional[FLOAT16], + _Optional[INT16], + _Optional[INT32], + _Optional[INT64], + _Optional[INT8], + _Optional[STRING], + _Optional[UINT16], + _Optional[UINT32], + _Optional[UINT64], + _Optional[UINT8], + ], + ) -> BOOL: + r"""[🌐 OptionalHasElement(15)](https://onnx.ai/onnx/operators/onnx__OptionalHasElement.html#optionalhaselement-15 "Online Documentation") + + + Returns true if the optional-type input contains an element. If it is an empty optional-type, this op returns false. + + + Args: + input: The optional input. + """ + + schema = get_schema("OptionalHasElement", 15, "") + op: Callable[..., BOOL] = Op(self, "OptionalHasElement", schema) + return op(*self._prepare_inputs(schema, input)) + + def Pow( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64], + Y: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64]: + r"""[🌐 Pow(15)](https://onnx.ai/onnx/operators/onnx__Pow.html#pow-15 "Online Documentation") + + + Pow takes input data (Tensor) and exponent Tensor, and + produces one output data (Tensor) where the function `f(x) = x^exponent`, + is applied to the data tensor elementwise. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + X: (differentiable) First operand, base of the exponent. + + Y: (differentiable) Second operand, power of the exponent. + """ + + schema = get_schema("Pow", 15, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64]] = Op( + self, "Pow", schema + ) + return op(*self._prepare_inputs(schema, X, Y)) + + def Shape( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + end: _Optional[int] = None, + start: int = 0, + ) -> INT64: + r"""[🌐 Shape(15)](https://onnx.ai/onnx/operators/onnx__Shape.html#shape-15 "Online Documentation") + + + Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. + Optional attributes start and end can be used to compute a slice of the input tensor's shape. + If start axis is omitted, the slice starts from axis 0. + The end axis, if specified, is exclusive (and the returned value will not include the size of that axis). + If the end axis is omitted, the axes upto the last one will be included. + Negative axes indicate counting back from the last axis. + Note that axes will be clamped to the range [0, r-1], where r is the + rank of the input tensor if they are out-of-range (after adding r in the case of + negative axis). Thus, specifying any end value > r is equivalent to specifying an end + value of r, and specifying any start value < -r is equivalent to specifying a start + value of 0. + + For example: + Input tensor with shape: [2, 3, 4] + No attributes specified. + Output: [2, 3, 4] + + Input tensor with shape: [2, 3, 4] + start: -1 + Output: [4] + + Input tensor with shape: [2, 3, 4] + end: -1 + Output: [2, 3] + + Input tensor with shape: [2, 3, 4] + start: 1 + end: 2 + Output: [3] + + + Args: + data: (non-differentiable) An input tensor. + + end: (Optional) Ending axis for slicing the shape. Negative value means + counting dimensions from the back. If omitted, sizes of all axes upto + (including) the last one will be included. + + start: (Optional) Starting axis for slicing the shape. Default value is + 0.Negative value means counting dimensions from the back. + """ + + schema = get_schema("Shape", 15, "") + op: Callable[..., INT64] = Op(self, "Shape", schema) + return op(*self._prepare_inputs(schema, data), end=end, start=start) diff --git a/onnxscript/onnx_opset/_impl/opset16.py b/onnxscript/onnx_opset/_impl/opset16.py new file mode 100644 index 0000000000..702a16859a --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset16.py @@ -0,0 +1,1828 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Union + +from onnx import GraphProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset15 import Opset15 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset16(Opset15): + def __new__(cls): + return Opset.__new__(cls, "", 16) + + def __init__(self): + super().__init__() + + def GreaterOrEqual( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 GreaterOrEqual(16)](https://onnx.ai/onnx/operators/onnx__GreaterOrEqual.html#greaterorequal-16 "Online Documentation") + + + Returns the tensor resulted from performing the `greater_equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("GreaterOrEqual", 16, "") + op: Callable[..., BOOL] = Op(self, "GreaterOrEqual", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def GridSample( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + grid: Union[DOUBLE, FLOAT, FLOAT16], + align_corners: int = 0, + mode: str = "bilinear", + padding_mode: str = "zeros", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 GridSample(16)](https://onnx.ai/onnx/operators/onnx__GridSample.html#gridsample-16 "Online Documentation") + + + Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from `grid`. + Currently, only spatial (4-D) inputs are supported. For input `X` with shape (N, C, H, W) and `grid` with shape (N, H_out, W_out, 2), + the output `Y` will have shape (N, C, H_out, W_out). + + The tensor `X` contains values at centers of square pixels in a H by W 2-dimensional image. + The tensor `grid` describes normalized positions where the output `Y` is to be computed + using a specified interpolation method (the mode) and a padding mode (for grid positions falling outside the 2-dimensional image). + + Elements in `grid[N, H_out, W_out]` are size-2 vectors specifying positions in the 2-dimensional space of `X`. + They are used to interpolate output values of `Y[N, C, H_out, W_out]`. + + The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). + See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample). + + + Args: + X: (differentiable) 4-D tensor of shape (N, C, H, W), where N is the batch + size, C is the numbers of channels, H and W are the height and width of + the input data. + + grid: (non-differentiable) Input offset, 4-D tensor of shape (N, H_out, + W_out, 2), where H_out and W_out are the height and width of grid and + output, Grid specifies the sampling pixel locations normalized by the + input spatial dimensions. Therefore, it should have most values in the + range of [-1, 1]. If grid has values outside the range of [-1, 1], the + corresponding outputs will be handled as defined by padding_mode. + + align_corners: If align_corners=1, the extrema (-1 and 1) are considered as + referring to the center points of the input's corner pixels. If + align_corners=0, they are instead considered as referring to the corner + points of the input's corner pixels, making the sampling more resolution + agnostic. + + mode: Three interpolation modes: bilinear (default), nearest and bicubic. + + padding_mode: Support padding modes for outside grid values: + `zeros`(default), `border`, `reflection`. zeros: use 0 for out-of-bound + grid locations, border: use border values for out-of-bound grid + locations, reflection: use values at locations reflected by the border + for out-of-bound grid locations. If index 0 represents the margin pixel, + the reflected value at index -1 will be the same as the value at index + 1. For location far away from the border, it will keep being reflected + until becoming in bound. If pixel location x = -3.5 reflects by border + -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = + 0.5. + """ + + schema = get_schema("GridSample", 16, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "GridSample", schema) + return op( + *self._prepare_inputs(schema, X, grid), + align_corners=align_corners, + mode=mode, + padding_mode=padding_mode, + ) + + def Identity( + self, + input: Union[ + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Identity(16)](https://onnx.ai/onnx/operators/onnx__Identity.html#identity-16 "Online Documentation") + + Identity operator + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Identity", 16, "") + op: Callable[ + ..., + Union[ + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Identity", schema) + return op(*self._prepare_inputs(schema, input)) + + def If( + self, + cond: BOOL, + else_branch: Optional[GraphProto] = None, + then_branch: Optional[GraphProto] = None, + ) -> Union[ + Optional[Sequence[BFLOAT16]], + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BFLOAT16], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BFLOAT16], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 If(16)](https://onnx.ai/onnx/operators/onnx__If.html#if-16 "Online Documentation") + + If conditional + + Args: + cond: Condition for the if + + else_branch: Graph to run if condition is false. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the then_branch. + + then_branch: Graph to run if condition is true. Has N outputs: values you + wish to be live-out to the enclosing scope. The number of outputs must + match the number of outputs in the else_branch. + """ + + schema = get_schema("If", 16, "") + op: Callable[ + ..., + Union[ + Optional[Sequence[BFLOAT16]], + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BFLOAT16], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BFLOAT16], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "If", schema) + return op( + *self._prepare_inputs(schema, cond), + else_branch=else_branch, + then_branch=then_branch, + ) + + def LeakyRelu( + self, X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], alpha: float = 0.009999999776482582 + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LeakyRelu(16)](https://onnx.ai/onnx/operators/onnx__LeakyRelu.html#leakyrelu-16 "Online Documentation") + + + LeakyRelu takes input data (Tensor) and an argument alpha, and produces one + output data (Tensor) where the function `f(x) = alpha * x for x < 0`, + `f(x) = x for x >= 0`, is applied to the data tensor elementwise. + + **History** + - Version 16 adds bfloat16 to the types allowed. + + + Args: + X: (differentiable) Input tensor + + alpha: Coefficient of leakage. + """ + + schema = get_schema("LeakyRelu", 16, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "LeakyRelu", schema + ) + return op(*self._prepare_inputs(schema, X), alpha=alpha) + + def LessOrEqual( + self, + A: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + B: Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> BOOL: + r"""[🌐 LessOrEqual(16)](https://onnx.ai/onnx/operators/onnx__LessOrEqual.html#lessorequal-16 "Online Documentation") + + + Returns the tensor resulted from performing the `less_equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("LessOrEqual", 16, "") + op: Callable[..., BOOL] = Op(self, "LessOrEqual", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Loop( + self, + M: Optional[INT64], + cond: Optional[BOOL], + *v_initial: Union[ + Optional[Sequence[BFLOAT16]], + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BFLOAT16], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BFLOAT16], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + ) -> Union[ + Optional[Sequence[BFLOAT16]], + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BFLOAT16], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BFLOAT16], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Loop(16)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-16 "Online Documentation") + + + Generic Looping construct. This loop has multiple termination conditions: + + 1) Trip count. Iteration count specified at runtime. Set by + specifying the input M. Optional. Set to empty string to omit. + Note that a static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. + 2) Loop termination condition. This is an input to the op that determines + whether to run the first iteration and also a loop-carried dependency for + the body graph. The body graph must yield a value for the condition variable, + whether this input is provided or not. + + This table summarizes the operating modes of this operator with equivalent + C-style code: + + Operator inputs defined as (max_trip_count, condition_var). + + input ("", ""): + for (int i=0; ; ++i) { + cond = ... // Note this value is ignored, but is required in the body + } + + input ("", cond) // Note this is analogous to a while loop + bool cond = ...; + for (int i=0; cond; ++i) { + cond = ...; + } + + input ("", 1) // Note this is analogous to a do-while loop + bool cond = true + for (int i=0; cond; ++i) { + cond = ...; + } + + input (trip_count, "") // Note this is analogous to a for loop + int trip_count = ... + for (int i=0; i < trip_count; ++i) { + cond = ...; // ignored + } + + input (trip_count, cond) + int trip_count = ...; + bool cond = ...; + for (int i=0; i < trip_count && cond; ++i) { + cond = ...; + } + + + *Sample usage - cond as well as trip count* + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + + *Sample equivalent C code* + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + + There are several things of note in this code snippet: + + 1) Values from the enclosing scope (i.e. variable "a" here) are in scope and can + be referenced in the inputs of the loop. + 2) Any values computed in the loop body that needs to be used in a subsequent + iteration or after the loop are modelled using a pair of variables in the loop-body, + consisting of an input variable (eg., b_in) and an output variable (eg., b_out). + These are referred to as loop-carried dependences. The loop operation node + supplies the input value of the input variable for the first iteration, and + returns the output value of the output variable produced by the final + iteration. + 3) Scan_output variables are used to implicitly concatenate values computed across + all the iterations. In the above example, the value of user_defined_val computed + over all iterations are concatenated and returned as the value of user_defined_vals + after the loop. + 4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + + Note that the semantics of this op support "diagonal" or "wavefront" execution. + (See Step 3 here for an example: + https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). + Frontends should emit multi-layer RNNs as a series of While operators (with + time being the inner looping dimension), with each successive layer consuming + the scan_outputs from the previous layer, possibly going through several + point-wise operators (e.g. dropout, residual connections, linear layer). + + The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order. + + + Args: + M: (optional) A maximum trip-count for the loop specified at runtime. + Optional. Pass empty string to skip. + + cond: (optional) A boolean termination condition. Optional. Pass empty + string to skip. + + v_initial: (variadic, heterogeneous) The initial values of any loop-carried + dependencies (values that change across loop iterations) + + body: The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + """ + + schema = get_schema("Loop", 16, "") + op: Callable[ + ..., + Union[ + Optional[Sequence[BFLOAT16]], + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BFLOAT16], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BFLOAT16], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Loop", schema) + return op(*self._prepare_inputs(schema, M, cond, *v_initial), body=body) + + def PRelu( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + slope: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 PRelu(16)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-16 "Online Documentation") + + + PRelu takes input data (Tensor) and slope tensor as input, and produces one + output data (Tensor) where the function `f(x) = slope * x for x < 0`, + `f(x) = x for x >= 0`., is applied to the data tensor elementwise. + + **History** + - Version 16 adds bfloat16 to the types allowed. + This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check `Broadcasting in ONNX `_. + + Args: + X: (differentiable) Input tensor + + slope: (differentiable) Slope tensor. The shape of slope can be smaller then + first input X; if so, its shape must be unidirectional broadcastable to + X + """ + + schema = get_schema("PRelu", 16, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "PRelu", schema) + return op(*self._prepare_inputs(schema, X, slope)) + + def RoiAlign( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + rois: Union[DOUBLE, FLOAT, FLOAT16], + batch_indices: INT64, + coordinate_transformation_mode: str = "half_pixel", + mode: str = "avg", + output_height: int = 1, + output_width: int = 1, + sampling_ratio: int = 0, + spatial_scale: float = 1.0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 RoiAlign(16)](https://onnx.ai/onnx/operators/onnx__RoiAlign.html#roialign-16 "Online Documentation") + + + Region of Interest (RoI) align operation described in the + [Mask R-CNN paper](https://arxiv.org/abs/1703.06870). + RoiAlign consumes an input tensor X and region of interests (rois) + to apply pooling across each RoI; it produces a 4-D tensor of shape + (num_rois, C, output_height, output_width). + + RoiAlign is proposed to avoid the misalignment by removing + quantizations while converting from original image into feature + map and from feature map into RoI feature; in each ROI bin, + the value of the sampled locations are computed directly + through bilinear interpolation. + + + Args: + X: Input data tensor from the previous operator; 4-D feature map of shape + (N, C, H, W), where N is the batch size, C is the number of channels, + and H and W are the height and the width of the data. + + rois: RoIs (Regions of Interest) to pool over; rois is 2-D input of shape + (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates + are in the coordinate system of the input image. Each coordinate set has + a 1:1 correspondence with the 'batch_indices' input. + + batch_indices: 1-D tensor of shape (num_rois,) with each element denoting + the index of the corresponding image in the batch. + + coordinate_transformation_mode: Allowed values are 'half_pixel' and + 'output_half_pixel'. Use the value 'half_pixel' to pixel shift the input + coordinates by -0.5 (the recommended behavior). Use the value + 'output_half_pixel' to omit the pixel shift for the input (use this for + a backward-compatible behavior). + + mode: The pooling method. Two modes are supported: 'avg' and 'max'. Default + is 'avg'. + + output_height: default 1; Pooled output Y's height. + + output_width: default 1; Pooled output Y's width. + + sampling_ratio: Number of sampling points in the interpolation grid used to + compute the output value of each pooled output bin. If > 0, then exactly + sampling_ratio x sampling_ratio grid points are used. If == 0, then an + adaptive number of grid points are used (computed as ceil(roi_width / + output_width), and likewise for height). Default is 0. + + spatial_scale: Multiplicative spatial scale factor to translate ROI + coordinates from their input spatial scale to the scale used when + pooling, i.e., spatial scale of the input feature map X relative to the + input image. E.g.; default is 1.0f. + """ + + schema = get_schema("RoiAlign", 16, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "RoiAlign", schema) + return op( + *self._prepare_inputs(schema, X, rois, batch_indices), + coordinate_transformation_mode=coordinate_transformation_mode, + mode=mode, + output_height=output_height, + output_width=output_width, + sampling_ratio=sampling_ratio, + spatial_scale=spatial_scale, + ) + + def Scan( + self, + *initial_state_and_scan_inputs: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + num_scan_inputs: Optional[int] = None, + scan_input_axes: Optional[Sequence[int]] = None, + scan_input_directions: Optional[Sequence[int]] = None, + scan_output_axes: Optional[Sequence[int]] = None, + scan_output_directions: Optional[Sequence[int]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scan(16)](https://onnx.ai/onnx/operators/onnx__Scan.html#scan-16 "Online Documentation") + + + Scan can be used to iterate over one or more scan_input tensors, + constructing zero or more scan_output tensors. It combines ideas from general recurrences, + functional programming constructs such as scan, fold, map, and zip, and is intended to enable + generalizations of RNN-like constructs for sequence-to-sequence processing. + Other tensors (referred to as state_variables here) can be used to carry a state + when iterating from one element to another (similar to hidden-state in RNNs, also referred + to as loop-carried dependences in the context of loops). + Many common usages involve a single scan_input tensor (where functionality + similar to scan, fold and map can be obtained). When more than one scan_input is used, + a behavior similar to zip is obtained. + + The attribute body must be a graph, specifying the computation to be performed in + every iteration. It takes as input the current values of the state_variables and + the current iterated element of the scan_inputs. It must return the (updated) values + of the state_variables and zero or more scan_output_element tensors. The values of the + scan_output_element tensors are concatenated over all the iterations to produce the + scan_output values of the scan construct (similar to the concatenated intermediate + hidden-state values of RNN-like constructs). All the output tensors (state_variables as + well as scan_output_element tensors) are required to have the same shape in each iteration + of the loop (a restriction imposed to enable efficient memory allocation). + + Note that the iterated element passed to the body subgraph does not have a sequence + axis. It will have a rank one less than the rank of the corresponding scan_input. + + The scan operation returns the final values of the state_variables as well as the + scan_outputs. + + The optional attribute scan_input_directions specifies the direction (forward or backward) + for each scan input. If this attribute is omitted, all sequences are scanned in the forward + direction. A bidirectional scan may be performed by specifying the same tensor input twice + in the scan_inputs, once with a forward direction, and once with a backward direction. + + The scan_output of the operation is produced by concatenating the scan_output_element + values produced by the body in each iteration. The optional attribute scan_output_directions + specifies the direction in which scan_output is constructed (by appending or prepending the + scan_output_element to scan_output in each iteration) for each scan_output. If this attribute + is omitted, the scan_output_element is appended to the scan_output in each iteration. + + The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. + If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the + batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. + Note that scanning a non-zero axis may be less efficient than scanning axis zero. + + The optional attribute scan_output_axes specifies the axis along which the scan_outputs + are accumulated for each scan_output. For example, if axis 1 is the time axis (to be + scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis + value of 1. + + Note that because of the ONNX restriction that only the last parameter of an operator can + be variadic, the initial-states and scan-inputs are listed together as one input parameter. + Similarly, the final-states and scan-outputs are listed together as one output parameter. + The attribute num_scan_inputs indicates the number M of scan-inputs. + + The behavior of + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + + is equivalent to the following pseudo-code: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + + *Sample usage: Encoding RNN using a Scan* + + The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, + recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can + be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes + %Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these + values are computed in the outer graph, they need to be passed in as extra state_variables. + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + + + + Args: + initial_state_and_scan_inputs: (variadic, heterogeneous) Initial values of + the loop's N state variables followed by M scan_inputs + + body: The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. + + num_scan_inputs: An attribute specifying the number of scan_inputs M. + + scan_input_axes: An optional list of M flags. The i-th element of the list + specifies the axis to be scanned (the sequence axis) for the i-th + scan_input. If omitted, 0 will be used as the scan axis for every + scan_input. Negative value for an axis means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(input). + + scan_input_directions: An optional list of M flags. The i-th element of the + list specifies the direction to be scanned for the i-th scan_input + tensor: 0 indicates forward direction and 1 indicates reverse direction. + If omitted, all scan_input tensors will be scanned in the forward + direction. + + scan_output_axes: An optional list of K flags. The i-th element of the list + specifies the axis for the i-th scan_output. The scan outputs are + accumulated along the specified axis. If omitted, 0 will be used as the + scan axis for every scan_output. Negative value for an axis means + counting dimensions from the back. Accepted range is [-r, r-1]. + + scan_output_directions: An optional list of K flags, one for each + scan_output. The i-th element of the list specifies whether the i-th + scan_output should be constructed by appending or prepending a new value + in each iteration: 0 indicates appending and 1 indicates prepending. If + omitted, all scan_output tensors will be produced by appending a value + in each iteration. + """ + + schema = get_schema("Scan", 16, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scan", schema) + return op( + *self._prepare_inputs(schema, *initial_state_and_scan_inputs), + body=body, + num_scan_inputs=num_scan_inputs, + scan_input_axes=scan_input_axes, + scan_input_directions=scan_input_directions, + scan_output_axes=scan_output_axes, + scan_output_directions=scan_output_directions, + ) + + def ScatterElements( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + reduction: str = "none", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterElements(16)](https://onnx.ai/onnx/operators/onnx__ScatterElements.html#scatterelements-16 "Online Documentation") + + + ScatterElements takes three inputs `data`, `updates`, and `indices` of the same + rank r >= 1 and an optional attribute axis that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). The output of the operation + is produced by creating a copy of the input `data`, and then updating its value + to values specified by `updates` at specific index positions specified by + `indices`. Its output shape is the same as the shape of `data`. + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension = axis is obtained from the value of the corresponding + entry in `indices` and the index-value for dimension != axis is obtained from the + index of the entry itself. + `reduction` allows specification of an optional reduction operation, which is applied to all values in `updates` + tensor into `output` at the specified `indices`. + In cases where `reduction` is set to "none", indices should not have duplicate entries: that is, if idx1 != idx2, + then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update + corresponding to the [i][j] entry is performed as below: + :: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + + + When `reduction` is set to "add", the update corresponding to the [i][j] entry is performed as below: + :: + + output[indices[i][j]][j] += updates[i][j] if axis = 0, + output[i][indices[i][j]] += updates[i][j] if axis = 1, + + + When `reduction` is set to "mul", the update corresponding to the [i][j] entry is performed as below: + :: + + output[indices[i][j]][j] *= updates[i][j] if axis = 0, + output[i][indices[i][j]] *= updates[i][j] if axis = 1, + + + This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. + Example 1: + :: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + + + Example 2: + :: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, of r >= 1 (same + rank as input). All index values are expected to be within bounds [-s, + s-1] along axis of size s. It is an error if any of the index values are + out of bounds. + + updates: (differentiable) Tensor of rank r >=1 (same rank and shape as + indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(data). + + reduction: Type of reduction to apply: none (default), add, mul. 'none': no + reduction applied. 'add': reduction using the addition operation. + 'mul': reduction using the multiplication operation. + """ + + schema = get_schema("ScatterElements", 16, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterElements", schema) + return op( + *self._prepare_inputs(schema, data, indices, updates), + axis=axis, + reduction=reduction, + ) + + def ScatterND( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + reduction: str = "none", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterND(16)](https://onnx.ai/onnx/operators/onnx__ScatterND.html#scatternd-16 "Online Documentation") + + + ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, + and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation + is produced by creating a copy of the input `data`, and then updating its value to values + specified by `updates` at specific index positions specified by `indices`. Its output shape + is the same as the shape of `data`. + + `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`. + `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`. + Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an + update to a single element of the tensor. When k is less than rank(data) each update entry specifies an + update to a slice of the tensor. Index values are allowed to be negative, as per the usual + convention for counting backwards from the end, but are expected in the valid range. + + `updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the + first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. + The remaining dimensions of `updates` correspond to the dimensions of the + replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, + corresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates` + must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation + of shapes. + + The `output` is calculated via the following equation: + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = updates[idx] + The order of iteration in the above loop is not specified. + In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. + This ensures that the output value does not depend on the iteration order. + + `reduction` allows specification of an optional reduction operation, which is applied to all values in `updates` + tensor into `output` at the specified `indices`. + In cases where `reduction` is set to "none", indices should not have duplicate entries: that is, if idx1 != idx2, + then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. + When `reduction` is set to "add", `output` is calculated as follows: + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] += updates[idx] + When `reduction` is set to "mul", `output` is calculated as follows: + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] *= updates[idx] + This operator is the inverse of GatherND. + Example 1: + :: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + + + Example 2: + :: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of rank q >= 1. + + updates: (differentiable) Tensor of rank q + r - indices_shape[-1] - 1. + + reduction: Type of reduction to apply: none (default), add, mul. 'none': no + reduction applied. 'add': reduction using the addition operation. + 'mul': reduction using the multiplication operation. + """ + + schema = get_schema("ScatterND", 16, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterND", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), reduction=reduction) + + def Where( + self, + condition: BOOL, + X: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + Y: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Where(16)](https://onnx.ai/onnx/operators/onnx__Where.html#where-16 "Online Documentation") + + + Return elements, either from X or Y, depending on condition. + Where behaves like + [numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) + with three parameters. + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + **History** + - Version 16 adds bfloat16 to the types allowed (for the second and third parameter). + + + Args: + condition: (non-differentiable) When True (nonzero), yield X, otherwise + yield Y + + X: (differentiable) values selected at indices where condition is True + + Y: (differentiable) values selected at indices where condition is False + """ + + schema = get_schema("Where", 16, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Where", schema) + return op(*self._prepare_inputs(schema, condition, X, Y)) diff --git a/onnxscript/onnx_opset/_impl/opset17.py b/onnxscript/onnx_opset/_impl/opset17.py new file mode 100644 index 0000000000..397317230d --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset17.py @@ -0,0 +1,622 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset16 import Opset16 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset17(Opset16): + def __new__(cls): + return Opset.__new__(cls, "", 17) + + def __init__(self): + super().__init__() + + def BlackmanWindow( + self, size: Union[INT32, INT64], output_datatype: int = 1, periodic: int = 1 + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 BlackmanWindow(17)](https://onnx.ai/onnx/operators/onnx__BlackmanWindow.html#blackmanwindow-17 "Online Documentation") + + + Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + + Args: + size: (non-differentiable) A scalar value indicating the length of the + window. + + output_datatype: The data type of the output tensor. Strictly must be one of + the values from DataType enum in TensorProto whose values correspond to + T2. The default value is 1 = FLOAT. + + periodic: If 1, returns a window to be used as periodic function. If 0, + return a symmetric window. When 'periodic' is specified, hann computes a + window of length size + 1 and returns the first size points. The default + value is 1. + """ + + schema = get_schema("BlackmanWindow", 17, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "BlackmanWindow", schema) + return op( + *self._prepare_inputs(schema, size), + output_datatype=output_datatype, + periodic=periodic, + ) + + def DFT( + self, + input: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + dft_length: Optional[Union[INT32, INT64]] = None, + axis: int = 1, + inverse: int = 0, + onesided: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 DFT(17)](https://onnx.ai/onnx/operators/onnx__DFT.html#dft-17 "Online Documentation") + + Computes the discrete Fourier transform of input. + + Args: + input: (non-differentiable) For real input, the following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex + input, the following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first + dimension is the batch dimension. The following N dimentions correspond + to the signal's dimensions. The final dimension represents the real and + imaginary parts of the value in that order. + + dft_length: (optional, non-differentiable) The length of the signal.If + greater than the axis dimension, the signal will be zero-padded up to + dft_length. If less than the axis dimension, only the first dft_length + values will be used as the signal. It's an optional value. + + axis: The axis on which to perform the DFT. By default this value is set to + 1, which corresponds to the first dimension after the batch index. + + inverse: Whether to perform the inverse discrete fourier transform. By + default this value is set to 0, which corresponds to false. + + onesided: If onesided is 1, only values for w in [0, 1, 2, ..., + floor(n_fft/2) + 1] are returned because the real-to-complex Fourier + transform satisfies the conjugate symmetry, i.e., X[m, w] = + X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, + then onesided output is not possible. Enabling onesided with real inputs + performs a Real-valued fast Fourier transform (RFFT). When invoked with + real or complex valued input, the default value is 0. Values can be 0 or + 1. + """ + + schema = get_schema("DFT", 17, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "DFT", schema) + return op( + *self._prepare_inputs(schema, input, dft_length), + axis=axis, + inverse=inverse, + onesided=onesided, + ) + + def HammingWindow( + self, size: Union[INT32, INT64], output_datatype: int = 1, periodic: int = 1 + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 HammingWindow(17)](https://onnx.ai/onnx/operators/onnx__HammingWindow.html#hammingwindow-17 "Online Documentation") + + + Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + + Args: + size: (non-differentiable) A scalar value indicating the length of the + window. + + output_datatype: The data type of the output tensor. Strictly must be one of + the values from DataType enum in TensorProto whose values correspond to + T2. The default value is 1 = FLOAT. + + periodic: If 1, returns a window to be used as periodic function. If 0, + return a symmetric window. When 'periodic' is specified, hann computes a + window of length size + 1 and returns the first size points. The default + value is 1. + """ + + schema = get_schema("HammingWindow", 17, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "HammingWindow", schema) + return op( + *self._prepare_inputs(schema, size), + output_datatype=output_datatype, + periodic=periodic, + ) + + def HannWindow( + self, size: Union[INT32, INT64], output_datatype: int = 1, periodic: int = 1 + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 HannWindow(17)](https://onnx.ai/onnx/operators/onnx__HannWindow.html#hannwindow-17 "Online Documentation") + + + Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + + Args: + size: (non-differentiable) A scalar value indicating the length of the + window. + + output_datatype: The data type of the output tensor. Strictly must be one of + the values from DataType enum in TensorProto whose values correspond to + T2. The default value is 1 = FLOAT. + + periodic: If 1, returns a window to be used as periodic function. If 0, + return a symmetric window. When 'periodic' is specified, hann computes a + window of length size + 1 and returns the first size points. The default + value is 1. + """ + + schema = get_schema("HannWindow", 17, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "HannWindow", schema) + return op( + *self._prepare_inputs(schema, size), + output_datatype=output_datatype, + periodic=periodic, + ) + + def LayerNormalization( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Scale: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = None, + axis: int = -1, + epsilon: float = 9.999999747378752e-06, + stash_type: int = 1, + ) -> Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], Union[BFLOAT16, FLOAT], Union[BFLOAT16, FLOAT] + ]: + r"""[🌐 LayerNormalization(17)](https://onnx.ai/onnx/operators/onnx__LayerNormalization.html#layernormalization-17 "Online Documentation") + + + This is layer normalization defined in ONNX as function. + The overall computation can be split into two stages. + The first stage is standardization, which makes the + normalized elements have zero mean and unit variances. + The computation required by standardization can be + described by the following equations. + ``` + Mean = ReduceMean(X) + D = Sub(X, Mean) + DD = Mul(D, D) + Var = ReduceMean(DD) + VarEps = Add(Var, epsilon) + StdDev = Sqrt(VarEps) + InvStdDev = Reciprocal(StdDev) + Normalized = Mul(D, InvStdDev) + ``` + where `normalized_axes` is `[axis, ..., rank of X - 1]`. + The variables `Var` and `StdDev` stand for variance and + standard deviation, respectively. The second output is + `Mean` and the last one is `InvStdDev`. + Depending on `stash_type` attribute, the actual computation + must happen in different floating-point precision. + For example, if `stash_type` is 1, this operator casts + all input variables to 32-bit float, perform the computation, and + finally cast `Normalized` back to the original type of `X`. + The second stage then scales and shifts the outcome of the + first stage using + ``` + NormalizedScaled = Mul(Normalized, Scale) + Y = Add(NormalizedScaled, B) + ``` + The second stage doesn't depends on `stash_type`. + All equations are in [this syntax](https://github.com/onnx/onnx/blob/main/docs/Syntax.md). + The same variable (i.e., input, output, and attribute) uses + the same name in the equations above and this operator's definition. + Let `d[i]` indicate the i-th dimension of `X`. + If `X`'s shape is `[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]`, + the shape of `Mean` and `InvStdDev` is `[d[0], ..., d[axis-1], 1, ..., 1]`. + `Y` and `X` have the same shape. + + + Args: + X: Tensor to be normalized. + + Scale: Scale tensor. + + B: (optional) Bias tensor. + + axis: The first normalization dimension. If rank(X) is r, axis' allowed + range is [-r, r]. Negative value means counting dimensions from the + back. + + epsilon: The epsilon value to use to avoid division by zero. + + stash_type: Type of Mean and InvStdDev. This also specifies stage one's + computation precision. + """ + + schema = get_schema("LayerNormalization", 17, "") + op: Callable[ + ..., + Tuple[ + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + Union[BFLOAT16, FLOAT], + Union[BFLOAT16, FLOAT], + ], + ] = Op(self, "LayerNormalization", schema) + return op( + *self._prepare_inputs(schema, X, Scale, B), + axis=axis, + epsilon=epsilon, + stash_type=stash_type, + ) + + def MelWeightMatrix( + self, + num_mel_bins: Union[INT32, INT64], + dft_length: Union[INT32, INT64], + sample_rate: Union[INT32, INT64], + lower_edge_hertz: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + upper_edge_hertz: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + output_datatype: int = 1, + ) -> Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 MelWeightMatrix(17)](https://onnx.ai/onnx/operators/onnx__MelWeightMatrix.html#melweightmatrix-17 "Online Documentation") + + + Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale. + This function defines the mel scale in terms of a frequency in hertz according to the following formula: + + mel(f) = 2595 * log10(1 + f/700) + + In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0. + + The returned MelWeightMatrix can be used to right-multiply a spectrogram S of shape [frames, num_spectrogram_bins] of linear scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram" M of shape [frames, num_mel_bins]. + + + Args: + num_mel_bins: (non-differentiable) The number of bands in the mel spectrum. + + dft_length: (non-differentiable) The size of the original DFT. The size of + the original DFT is used to infer the size of the onesided DFT, which is + understood to be floor(dft_length/2) + 1, i.e. the spectrogram only + contains the nonredundant DFT bins. + + sample_rate: (non-differentiable) Samples per second of the input signal + used to create the spectrogram. Used to figure out the frequencies + corresponding to each spectrogram bin, which dictates how they are + mapped into the mel scale. + + lower_edge_hertz: (non-differentiable) Lower bound on the frequencies to be + included in the mel spectrum. This corresponds to the lower edge of the + lowest triangular band. + + upper_edge_hertz: (non-differentiable) The desired top edge of the highest + frequency band. + + output_datatype: The data type of the output tensor. Strictly must be one of + the values from DataType enum in TensorProto whose values correspond to + T3. The default value is 1 = FLOAT. + """ + + schema = get_schema("MelWeightMatrix", 17, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "MelWeightMatrix", schema) + return op( + *self._prepare_inputs( + schema, + num_mel_bins, + dft_length, + sample_rate, + lower_edge_hertz, + upper_edge_hertz, + ), + output_datatype=output_datatype, + ) + + def STFT( + self, + signal: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + frame_step: Union[INT32, INT64], + window: Optional[Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = None, + frame_length: Optional[Union[INT32, INT64]] = None, + onesided: int = 1, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 STFT(17)](https://onnx.ai/onnx/operators/onnx__STFT.html#stft-17 "Online Documentation") + + Computes the Short-time Fourier Transform of the signal. + + Args: + signal: (non-differentiable) Input tensor representing a real or complex + valued signal. For real input, the following shape is expected: + [batch_size][signal_length][1]. For complex input, the following shape + is expected: [batch_size][signal_length][2], where + [batch_size][signal_length][0] represents the real component and + [batch_size][signal_length][1] represents the imaginary component of the + signal. + + frame_step: (non-differentiable) The number of samples to step between + successive DFTs. + + window: (optional, non-differentiable) A tensor representing the window that + will be slid over the signal.The window must have rank 1 with shape: + [window_shape]. It's an optional value. + + frame_length: (optional, non-differentiable) A scalar representing the size + of the DFT. It's an optional value. + + onesided: If onesided is 1, only values for w in [0, 1, 2, ..., + floor(n_fft/2) + 1] are returned because the real-to-complex Fourier + transform satisfies the conjugate symmetry, i.e., X[m, w] = + X[m,w]=X[m,n_fft-w]*. Note if the input or window tensors are complex, + then onesided output is not possible. Enabling onesided with real inputs + performs a Real-valued fast Fourier transform (RFFT).When invoked with + real or complex valued input, the default value is 1. Values can be 0 or + 1. + """ + + schema = get_schema("STFT", 17, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op(self, "STFT", schema) + return op( + *self._prepare_inputs(schema, signal, frame_step, window, frame_length), + onesided=onesided, + ) + + def SequenceMap( + self, + input_sequence: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + *additional_inputs: Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ]: + r"""[🌐 SequenceMap(17)](https://onnx.ai/onnx/operators/onnx__SequenceMap.html#sequencemap-17 "Online Documentation") + + + Applies a sub-graph to each sample in the input sequence(s). + + Inputs can be either tensors or sequences, with the exception of the first input which must + be a sequence. The length of the first input sequence will determine the number of samples in the + outputs. Any other sequence inputs should have the same number of samples. The number of inputs + and outputs, should match the one of the subgraph. + + For each i-th element in the output, a sample will be extracted from the input sequence(s) at + the i-th position and the sub-graph will be applied to it. + The outputs will contain the outputs of the sub-graph for each sample, in the same order as in + the input. + + This operator assumes that processing each sample is independent and could executed in parallel + or in any order. Users cannot expect any specific ordering in which each subgraph is computed. + + Args: + input_sequence: Input sequence. + + additional_inputs: (variadic, heterogeneous) Additional inputs to the graph + + body: The graph to be run for each sample in the sequence(s). It should have + as many inputs and outputs as inputs and outputs to the SequenceMap + function. + """ + + schema = get_schema("SequenceMap", 17, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + ], + ] = Op(self, "SequenceMap", schema) + return op(*self._prepare_inputs(schema, input_sequence, *additional_inputs), body=body) diff --git a/onnxscript/onnx_opset/_impl/opset18.py b/onnxscript/onnx_opset/_impl/opset18.py new file mode 100644 index 0000000000..c568643608 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset18.py @@ -0,0 +1,2040 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset17 import Opset17 +from onnxscript.onnx_types import ( + BFLOAT16, + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset18(Opset17): + def __new__(cls): + return Opset.__new__(cls, "", 18) + + def __init__(self): + super().__init__() + + def BitwiseAnd( + self, + A: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + B: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + ) -> Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8]: + r"""[🌐 BitwiseAnd(18)](https://onnx.ai/onnx/operators/onnx__BitwiseAnd.html#bitwiseand-18 "Online Documentation") + + + Returns the tensor resulting from performing the bitwise `and` operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the bitwise operator. + + B: (non-differentiable) Second input operand for the bitwise operator. + """ + + schema = get_schema("BitwiseAnd", 18, "") + op: Callable[ + ..., Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8] + ] = Op(self, "BitwiseAnd", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def BitwiseNot( + self, X: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8] + ) -> Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8]: + r"""[🌐 BitwiseNot(18)](https://onnx.ai/onnx/operators/onnx__BitwiseNot.html#bitwisenot-18 "Online Documentation") + + + Returns the bitwise not of the input tensor element-wise. + + + Args: + X: (non-differentiable) Input tensor + """ + + schema = get_schema("BitwiseNot", 18, "") + op: Callable[ + ..., Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8] + ] = Op(self, "BitwiseNot", schema) + return op(*self._prepare_inputs(schema, X)) + + def BitwiseOr( + self, + A: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + B: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + ) -> Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8]: + r"""[🌐 BitwiseOr(18)](https://onnx.ai/onnx/operators/onnx__BitwiseOr.html#bitwiseor-18 "Online Documentation") + + + Returns the tensor resulting from performing the bitwise `or` operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the bitwise operator. + + B: (non-differentiable) Second input operand for the bitwise operator. + """ + + schema = get_schema("BitwiseOr", 18, "") + op: Callable[ + ..., Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8] + ] = Op(self, "BitwiseOr", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def BitwiseXor( + self, + A: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + B: Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8], + ) -> Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8]: + r"""[🌐 BitwiseXor(18)](https://onnx.ai/onnx/operators/onnx__BitwiseXor.html#bitwisexor-18 "Online Documentation") + + + Returns the tensor resulting from performing the bitwise `xor` operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the bitwise operator. + + B: (non-differentiable) Second input operand for the bitwise operator. + """ + + schema = get_schema("BitwiseXor", 18, "") + op: Callable[ + ..., Union[INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8] + ] = Op(self, "BitwiseXor", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def CenterCropPad( + self, + input_data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: Union[INT32, INT64], + axes: Optional[Sequence[int]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 CenterCropPad(18)](https://onnx.ai/onnx/operators/onnx__CenterCropPad.html#centercroppad-18 "Online Documentation") + + + Center crop or pad an input to given dimensions. + + The crop/pad dimensions can be specified for a subset of the `axes`. Non-specified dimensions will not be + cropped or padded. + + If the input dimensions are bigger than the crop shape, a centered cropping window is extracted from the input. + If the input dimensions are smaller than the crop shape, the input is padded on each side equally, + so that the input is centered in the output. + + + Args: + input_data: (differentiable) Input to extract the centered crop from. + + shape: (non-differentiable) 1-D tensor representing the cropping window + dimensions. + + axes: If provided, it specifies a subset of axes that 'shape' refer to. If + not provided, all axes are assumed [0, 1, ..., r-1], where r = + rank(data). Negative value means counting dimensions from the back. + Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined + if an axis is repeated. + """ + + schema = get_schema("CenterCropPad", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "CenterCropPad", schema) + return op(*self._prepare_inputs(schema, input_data, shape), axes=axes) + + def Col2Im( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + image_shape: INT64, + block_shape: INT64, + dilations: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Col2Im(18)](https://onnx.ai/onnx/operators/onnx__Col2Im.html#col2im-18 "Online Documentation") + + + The operator rearranges column blocks back into a multidimensional image + + Col2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, + but it only supports *batched* multi-dimensional image tensors. + Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/. + + NOTE: Although specifying image_shape looks redundant because it could be calculated from + convolution formulas, it is required as input for more advanced scenarios as explained + at PyTorch's implementation (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10) + + + + Args: + input: (differentiable) Input data tensor to be rearranged from column + blocks back into an image. This is a 3-dimensional tensor containing [N, + C * n-ary-product(block_shape), L], where N is batch dimension, C is + image channel dimension and L is number of blocks.The blocks are + enumerated in increasing lexicographic-order of their indices.For + example, with an image-size 10*20 and block-size 9*18, there would be + 2*3 blocks, enumerated in the order block(0, 0), block(0, 1), block(0, + 2), block(1, 0), block(1, 1), block(1, 2). + + image_shape: (non-differentiable) The shape of the spatial dimensions of the + image after rearranging the column blocks.This is a 1-dimensional tensor + with size of at least 2, containing the value [H_img, W_img] for a 2-D + image or [dim_i1, dim_i2, ..., dim_iN] for a N-D image. + + block_shape: (non-differentiable) The shape of the block to apply on the + input.This is a 1-dimensional tensor of size of at least 2, containing + the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, ..., + dim_bN] for a N-D block.This is the block-shape before dilation is + applied to it. + + dilations: 1-dimensional tensor with dilation value along each spatial axis + of the image. If not present, the dilation defaults to 1 along each + spatial axis of the image. + + pads: 1-dimensional tensor with padding value for the beginning and ending + along each spatial axis, it can take any value greater than or equal to + 0. The value represent the number of pixels added to the beginning and + end part of the corresponding axis. `pads` format should be as follow + [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number + of pixels added at the beginning of axis `i` and xi_end is the number of + pixels added at the end of axis `i`. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: 1-dimensional tensor with stride value along each spatial axis. If + not present, the stride defaults to 1 along each spatial axis. + """ + + schema = get_schema("Col2Im", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Col2Im", schema) + return op( + *self._prepare_inputs(schema, input, image_shape, block_shape), + dilations=dilations, + pads=pads, + strides=strides, + ) + + def GroupNormalization( + self, + X: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + scale: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + bias: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + num_groups: Optional[int] = None, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 GroupNormalization(18)](https://onnx.ai/onnx/operators/onnx__GroupNormalization.html#groupnormalization-18 "Online Documentation") + + + A GroupNormalization function. Carries out group normalization as described in + the paper https://arxiv.org/abs/1803.08494 + + This operator transforms input according to + :: + + y = scale * (x - mean) / sqrt(variance + epsilon) + bias, + + + where the mean and variance are computed per instance per group of channels, and + `scale` and `bias` should be specified for each group of channels. The number of + groups `num_groups` should be divisible by the number of channels so that there are + an equal number of channels per group. + + When the number of groups is the same as the number of channels, this operator is + equivalent to InstanceNormalization. When there is only one group, this operator + is equivalent to LayerNormalization. + + + Args: + X: (differentiable) Input data tensor. Dimensions for image cases are `(N x + C x H x W)`, where `N` is the batch size, `C` is the number of channels, + and `H` and `W` are the height and width of the data. Statistics are + computed for every group of channels over `C`, `H`, and `W`. For + non-image cases, the dimensions are in the form of `(N x C x D1 x D2 ... + Dn)`. + + scale: (differentiable) Scale tensor of shape `(num_groups)`. + + bias: (differentiable) Bias tensor of shape `(num_groups)`. + + epsilon: The epsilon value to use to avoid division by zero. + + num_groups: The number of groups of channels. It should be a divisor of the + number of channels `C`. + """ + + schema = get_schema("GroupNormalization", 18, "") + op: Callable[..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16]] = Op( + self, "GroupNormalization", schema + ) + return op( + *self._prepare_inputs(schema, X, scale, bias), + epsilon=epsilon, + num_groups=num_groups, + ) + + def LpPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + dilations: Optional[Sequence[int]] = None, + kernel_shape: Optional[Sequence[int]] = None, + p: int = 2, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LpPool(18)](https://onnx.ai/onnx/operators/onnx__LpPool.html#lppool-18 "Online Documentation") + + + LpPool consumes an input tensor X and applies Lp pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + Lp pooling consisting of computing the Lp norm on all values of a subset + of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) + ``` + or + ``` + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) + ``` + if ceil_mode is enabled + + ``` + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] + ``` + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = + ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is + split between the two sides equally or almost equally (depending on + whether it is even or odd). In case the padding is an odd number, the + extra padding is added at the end for SAME_UPPER and at the beginning + for SAME_LOWER. + + ceil_mode: Whether to use ceil or floor (default) to compute the output + shape. + + dilations: dilation value along each spatial axis of the filter. If not + present, the dilation defaults is 1 along each spatial axis. + + kernel_shape: The size of the kernel along each axis. + + p: p value of the Lp norm used to pool over the input data. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. If not present, the stride defaults + to 1 along each spatial axis. + """ + + schema = get_schema("LpPool", 18, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LpPool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + p=p, + pads=pads, + strides=strides, + ) + + def Mish(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mish(18)](https://onnx.ai/onnx/operators/onnx__Mish.html#mish-18 "Online Documentation") + + + Mish: A Self Regularized Non-Monotonic Neural Activation Function. + + Perform the linear unit element-wise on the input tensor X using formula: + + :: + + mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) + + + + + Args: + X: (differentiable) Input tensor + """ + + schema = get_schema("Mish", 18, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mish", schema) + return op(*self._prepare_inputs(schema, X)) + + def OptionalGetElement( + self, + input: Union[ + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 OptionalGetElement(18)](https://onnx.ai/onnx/operators/onnx__OptionalGetElement.html#optionalgetelement-18 "Online Documentation") + + + If the input is a tensor or sequence type, it returns the input. + If the input is an optional type, it outputs the element in the input. + It is an error if the input is an empty optional-type (i.e. does not have an element) and the behavior is undefined in this case. + + + Args: + input: The optional input. + """ + + schema = get_schema("OptionalGetElement", 18, "") + op: Callable[ + ..., + Union[ + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "OptionalGetElement", schema) + return op(*self._prepare_inputs(schema, input)) + + def OptionalHasElement( + self, + input: Optional[ + Union[ + Optional[Sequence[BOOL]], + Optional[Sequence[COMPLEX128]], + Optional[Sequence[COMPLEX64]], + Optional[Sequence[DOUBLE]], + Optional[Sequence[FLOAT]], + Optional[Sequence[FLOAT16]], + Optional[Sequence[INT16]], + Optional[Sequence[INT32]], + Optional[Sequence[INT64]], + Optional[Sequence[INT8]], + Optional[Sequence[STRING]], + Optional[Sequence[UINT16]], + Optional[Sequence[UINT32]], + Optional[Sequence[UINT64]], + Optional[Sequence[UINT8]], + Optional[BOOL], + Optional[COMPLEX128], + Optional[COMPLEX64], + Optional[DOUBLE], + Optional[FLOAT], + Optional[FLOAT16], + Optional[INT16], + Optional[INT32], + Optional[INT64], + Optional[INT8], + Optional[STRING], + Optional[UINT16], + Optional[UINT32], + Optional[UINT64], + Optional[UINT8], + Sequence[BOOL], + Sequence[COMPLEX128], + Sequence[COMPLEX64], + Sequence[DOUBLE], + Sequence[FLOAT], + Sequence[FLOAT16], + Sequence[INT16], + Sequence[INT32], + Sequence[INT64], + Sequence[INT8], + Sequence[STRING], + Sequence[UINT16], + Sequence[UINT32], + Sequence[UINT64], + Sequence[UINT8], + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + ) -> BOOL: + r"""[🌐 OptionalHasElement(18)](https://onnx.ai/onnx/operators/onnx__OptionalHasElement.html#optionalhaselement-18 "Online Documentation") + + + Returns true if (1) the input is an optional-type and contains an element, + or, (2) the input is a tensor or sequence type. + If the input is not provided or is an empty optional-type, this op returns false. + + + Args: + input: (optional) The optional input. + """ + + schema = get_schema("OptionalHasElement", 18, "") + op: Callable[..., BOOL] = Op(self, "OptionalHasElement", schema) + return op(*self._prepare_inputs(schema, input)) + + def Pad( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + pads: INT64, + constant_value: Optional[ + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ] + ] = None, + axes: Optional[Union[INT32, INT64]] = None, + mode: str = "constant", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Pad(18)](https://onnx.ai/onnx/operators/onnx__Pad.html#pad-18 "Online Documentation") + + + Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, + a padded tensor (`output`) is generated. + + The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`): + + 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0, empty string, or False) + + 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis + + 3) `edge` - pads with the edge values of array + + + Example 1 (`constant` mode): + Insert 0 pads to the beginning of the second dimension. + + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = + [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + + + Example 2 (`reflect` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = + [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + + + Example 3 (`edge` mode): + data = + [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = + [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + + + + Args: + data: (differentiable) Input tensor. + + pads: (non-differentiable) Tensor of integers indicating the number of + padding elements to add or remove (if negative) at the beginning and end + of each axis. For 2D input tensor, it is the number of pixels. `pads` + should be a 1D tensor of shape [2 * num_axes] where `num_axes` refers to + the number of elements in the `axes` input or the input rank if `axes` + are not provided explicitly. `pads` format should be: [x1_begin, + x2_begin, ..., x1_end, x2_end,...], where xi_begin is the number of pad + values added at the beginning of axis `axes[i]` and xi_end, the number + of pad values added at the end of axis `axes[i]`. + + constant_value: (optional, non-differentiable) (Optional) A scalar value to + be used if the mode chosen is `constant` (by default it is 0, empty + string or False). + + axes: (optional, non-differentiable) 1-D tensor of axes that `pads` apply + to. Negative value means counting dimensions from the back. Accepted + range is [-r, r-1] where r = rank(data). Behavior is undefined if an + axis is repeated. If not provided, all axes are assumed (`[0, 1, ..., + input_rank-1]`). + + mode: Supported modes: `constant`(default), `reflect`, `edge` + """ + + schema = get_schema("Pad", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Pad", schema) + return op(*self._prepare_inputs(schema, data, pads, constant_value, axes), mode=mode) + + def ReduceL1( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL1(18)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-18 "Online Documentation") + + + Computes the L1 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceL1", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceL1", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceL2( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceL2(18)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-18 "Online Documentation") + + + Computes the L2 norm of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceL2", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceL2", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceLogSum( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSum(18)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-18 "Online Documentation") + + + Computes the log sum of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceLogSum", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceLogSum", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceLogSumExp( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceLogSumExp(18)](https://onnx.ai/onnx/operators/onnx__ReduceLogSumExp.html#reducelogsumexp-18 "Online Documentation") + + + Computes the log sum exponent of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceLogSumExp", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceLogSumExp", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceMax( + self, + data: Union[ + BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8 + ], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMax(18)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-18 "Online Documentation") + + + Computes the max of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceMax", 18, "") + op: Callable[ + ..., + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + ] = Op(self, "ReduceMax", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceMean( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceMean(18)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-18 "Online Documentation") + + + Computes the mean of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceMean", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceMean", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceMin( + self, + data: Union[ + BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8 + ], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8]: + r"""[🌐 ReduceMin(18)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-18 "Online Documentation") + + + Computes the min of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceMin", 18, "") + op: Callable[ + ..., + Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, INT8, UINT32, UINT64, UINT8], + ] = Op(self, "ReduceMin", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceProd( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceProd(18)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-18 "Online Documentation") + + + Computes the product of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceProd", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceProd", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def ReduceSumSquare( + self, + data: Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axes: Optional[INT64] = None, + keepdims: int = 1, + noop_with_empty_axes: int = 0, + ) -> Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 ReduceSumSquare(18)](https://onnx.ai/onnx/operators/onnx__ReduceSumSquare.html#reducesumsquare-18 "Online Documentation") + + + Computes the sum square of the input tensor's element along the provided axes. The resulting + tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then + the resulting tensor has the reduced dimension pruned. + + The above behavior is similar to numpy, with the exception that numpy defaults keepdims to + False instead of True. + + Args: + data: (differentiable) An input tensor. + + axes: (optional, non-differentiable) Optional input list of integers, along + which to reduce. The default is to reduce over all the dimensions of the + input tensor if 'noop_with_empty_axes' is false, else act as an Identity + op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] + where r = rank(data). + + keepdims: Keep the reduced dimension or not, default 1 means keep reduced + dimension. + + noop_with_empty_axes: Defines behavior if 'axes' is empty. Default behavior + with 'false' is to reduce all axes. When axes is empty and this + attribute is set to true, input tensor will not be reduced,and the + output tensor would be equivalent to input tensor. + """ + + schema = get_schema("ReduceSumSquare", 18, "") + op: Callable[ + ..., Union[BFLOAT16, DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64] + ] = Op(self, "ReduceSumSquare", schema) + return op( + *self._prepare_inputs(schema, data, axes), + keepdims=keepdims, + noop_with_empty_axes=noop_with_empty_axes, + ) + + def Resize( + self, + X: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + roi: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + scales: Optional[FLOAT] = None, + sizes: Optional[INT64] = None, + antialias: int = 0, + axes: Optional[Sequence[int]] = None, + coordinate_transformation_mode: str = "half_pixel", + cubic_coeff_a: float = -0.75, + exclude_outside: int = 0, + extrapolation_value: float = 0.0, + keep_aspect_ratio_policy: str = "stretch", + mode: str = "nearest", + nearest_mode: str = "round_prefer_floor", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Resize(18)](https://onnx.ai/onnx/operators/onnx__Resize.html#resize-18 "Online Documentation") + + + Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. + Each dimension value of the output tensor is:
+ `output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)`
+ if input \"sizes\" is not specified. + + + Args: + X: (differentiable) N-D tensor + + roi: (optional, non-differentiable) 1-D tensor given as [start1, ..., + startN, end1, ..., endN], where N is the rank of X or the length of + axes, if provided. The RoIs' coordinates are normalized in the + coordinate system of the input image. It only takes effect when + coordinate_transformation_mode is "tf_crop_and_resize" + + scales: (optional, non-differentiable) The scale array along each dimension. + It takes value greater than 0. If it's less than 1, it's sampling down, + otherwise, it's upsampling. The number of elements of 'scales' should be + the same as the rank of input 'X' or the length of 'axes', if provided. + One of 'scales' and 'sizes' MUST be specified and it is an error if both + are specified. If 'sizes' is needed, the user can use an empty string as + the name of 'scales' in this operator's input list. + + sizes: (optional, non-differentiable) Target size of the output tensor. Its + interpretation depends on the 'keep_aspect_ratio_policy' value.The + number of elements of 'sizes' should be the same as the rank of input + 'X', or the length of 'axes', if provided. Only one of 'scales' and + 'sizes' can be specified. + + antialias: If set to 1, "linear" and "cubic" interpolation modes will use an + antialiasing filter when downscaling. Antialiasing is achieved by + stretching the resampling filter by a factor max(1, 1 / scale), which + means that when downsampling, more input pixels contribute to an output + pixel. + + axes: If provided, it specifies a subset of axes that 'roi', 'scales' and + 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., + r-1], where r = rank(data). Non-specified dimensions are interpreted as + non-resizable. Negative value means counting dimensions from the back. + Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined + if an axis is repeated. + + coordinate_transformation_mode: + This attribute describes how to transform + the coordinate in the resized tensor to the coordinate in the original + tensor.
+ + The coordinate of each dimension is transformed + individually. Let's describe a case using axis x as an example. + Denote + x_resized as the coordinate of axis x in the resized tensor, x_original + as the coordinate of axis x in the original tensor, `length_original` as + the length of the original tensor in axis x, length_resized as the + length of the resized tensor in axis x, roi_x = (start_x, end_x) of the + axis x in input "roi", `scale = length_resized / length_original`,
+ if coordinate_transformation_mode is `"half_pixel"`,
+ `x_original = + (x_resized + 0.5) / scale - 0.5`
+ + if + coordinate_transformation_mode is `"pytorch_half_pixel"`,
+ `x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0` +
+ + if coordinate_transformation_mode is `"align_corners"`,
+ `x_original = x_resized * (length_original - 1) / (length_resized - 1)` +
+ + if coordinate_transformation_mode is `"asymmetric"`,
+ `x_original = x_resized / scale`
+ + if + coordinate_transformation_mode is `"tf_crop_and_resize"`,
+ `x_original = length_resized > 1 ? start_x * (length_original - 1) + + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized + - 1) : 0.5 * (start_x + end_x) * (length_original - 1)` + . + + cubic_coeff_a: The coefficient 'a' used in cubic interpolation. Two common + choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). + Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 + for the details. This attribute is valid only if mode is "cubic". + + exclude_outside: If set to 1, the weight of sampling locations outside the + tensor will be set to 0 and the weight will be renormalized so that + their sum is 1.0. The default value is 0. + + extrapolation_value: When coordinate_transformation_mode is + "tf_crop_and_resize" and x_original is outside the range [0, + length_original - 1], this value is used as the corresponding output + value. Default is 0.0f. + + keep_aspect_ratio_policy: + This attribute describes how to interpret the + `sizes` input with regard to keeping the original aspect ratio of the + input, and it is not applicable when + the `scales` input is used.
+ Given a set of `sizes`, associated with a subset of `axes` (explicitly + provided or default), and assuming `d = axes[i]`, with `i` being the + index of the provided `sizes`.
+ + If `keep_aspect_ratio_policy` is + `"stretch"`, the original aspect ratio is disregarded, and the input is + resized to the specified size:
+ `out_size[d] = sizes[i]`
+ + If + `keep_aspect_ratio_policy` is `"not_larger"`, the sizes are adjusted so + that no extent of the output is larger than the specified size, while + keeping the original aspect ratio:
+ `scale = Min(sizes[i] / + in_size[d])`
+ `out_size[d] = round_int(scale * in_size[i])`
+ If `keep_aspect_ratio_policy` is `"not_smaller"`, the sizes are adjusted + so that no extent of the output is smaller than the specified size, + while keeping the original aspect ratio:
+ `scale = Max(sizes[i] / + in_size[d])`
+ `out_size[d] = round_int(scale * in_size[i])`
+ For non-resizable axes (those not specified in `axes`), the output size + will be equal to the input size. + + Note: `round_int` stands for computing + the nearest integer value, rounding halfway cases up. + + mode: Three interpolation modes: "nearest" (default), "linear" and "cubic". + The "linear" mode includes linear interpolation for 1D tensor and + N-linear interpolation for N-D tensor (for example, bilinear + interpolation for 2D tensor). The "cubic" mode includes cubic + interpolation for 1D tensor and N-cubic interpolation for N-D tensor + (for example, bicubic interpolation for 2D tensor). + + nearest_mode: Four modes: "round_prefer_floor" (default, as known as round + half down), "round_prefer_ceil" (as known as round half up), "floor", + "ceil". Only used by nearest interpolation. It indicates how to get + "nearest" pixel in input tensor from x_original, so this attribute is + valid only if "mode" is "nearest". + """ + + schema = get_schema("Resize", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Resize", schema) + return op( + *self._prepare_inputs(schema, X, roi, scales, sizes), + antialias=antialias, + axes=axes, + coordinate_transformation_mode=coordinate_transformation_mode, + cubic_coeff_a=cubic_coeff_a, + exclude_outside=exclude_outside, + extrapolation_value=extrapolation_value, + keep_aspect_ratio_policy=keep_aspect_ratio_policy, + mode=mode, + nearest_mode=nearest_mode, + ) + + def ScatterElements( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + reduction: str = "none", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterElements(18)](https://onnx.ai/onnx/operators/onnx__ScatterElements.html#scatterelements-18 "Online Documentation") + + + ScatterElements takes three inputs `data`, `updates`, and `indices` of the same + rank r >= 1 and an optional attribute axis that identifies an axis of `data` + (by default, the outer-most axis, that is axis 0). The output of the operation + is produced by creating a copy of the input `data`, and then updating its value + to values specified by `updates` at specific index positions specified by + `indices`. Its output shape is the same as the shape of `data`. + + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension = axis is obtained from the value of the corresponding + entry in `indices` and the index-value for dimension != axis is obtained from the + index of the entry itself. + + `reduction` allows specification of an optional reduction operation, which is applied to all values in `updates` + tensor into `output` at the specified `indices`. + In cases where `reduction` is set to "none", indices should not have duplicate entries: that is, if idx1 != idx2, + then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, the update + corresponding to the [i][j] entry is performed as below: + :: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + + + When `reduction` is set to some reduction function `f`, the update corresponding to the [i][j] entry is performed as below: + :: + + output[indices[i][j]][j] += f(output[indices[i][j]][j], updates[i][j]) if axis = 0, + output[i][indices[i][j]] += f(output[i][indices[i][j]], updates[i][j]) if axis = 1, + + + where the `f` is +/*/max/min as specified. + + + This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. + + (Opset 18 change): Adds max/min to the set of allowed reduction ops. + + Example 1: + :: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + + + Example 2: + :: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of int32/int64 indices, of r >= 1 (same + rank as input). All index values are expected to be within bounds [-s, + s-1] along axis of size s. It is an error if any of the index values are + out of bounds. + + updates: (differentiable) Tensor of rank r >=1 (same rank and shape as + indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] where r = rank(data). + + reduction: Type of reduction to apply: none (default), add, mul, max, min. + 'none': no reduction applied. 'add': reduction using the addition + operation. 'mul': reduction using the multiplication operation.'max': + reduction using the maximum operation.'min': reduction using the minimum + operation. + """ + + schema = get_schema("ScatterElements", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterElements", schema) + return op( + *self._prepare_inputs(schema, data, indices, updates), + axis=axis, + reduction=reduction, + ) + + def ScatterND( + self, + data: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: INT64, + updates: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + reduction: str = "none", + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 ScatterND(18)](https://onnx.ai/onnx/operators/onnx__ScatterND.html#scatternd-18 "Online Documentation") + + + ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, + and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation + is produced by creating a copy of the input `data`, and then updating its value to values + specified by `updates` at specific index positions specified by `indices`. Its output shape + is the same as the shape of `data`. + + `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`. + `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`. + Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an + update to a single element of the tensor. When k is less than rank(data) each update entry specifies an + update to a slice of the tensor. Index values are allowed to be negative, as per the usual + convention for counting backwards from the end, but are expected in the valid range. + + `updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the + first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. + The remaining dimensions of `updates` correspond to the dimensions of the + replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, + corresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates` + must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation + of shapes. + + The `output` is calculated via the following equation: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = updates[idx] + + The order of iteration in the above loop is not specified. + In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. + This ensures that the output value does not depend on the iteration order. + + `reduction` allows specification of an optional reduction operation, which is applied to all values in `updates` + tensor into `output` at the specified `indices`. + In cases where `reduction` is set to "none", indices should not have duplicate entries: that is, if idx1 != idx2, + then indices[idx1] != indices[idx2]. This ensures that the output value does not depend on the iteration order. + When `reduction` is set to some reduction function `f`, `output` is calculated as follows: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = f(output[indices[idx]], updates[idx]) + + where the `f` is +/*/max/min as specified. + + This operator is the inverse of GatherND. + + (Opset 18 change): Adds max/min to the set of allowed reduction ops. + + Example 1: + :: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + + + + Example 2: + :: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + + + + + Args: + data: (differentiable) Tensor of rank r >= 1. + + indices: (non-differentiable) Tensor of rank q >= 1. + + updates: (differentiable) Tensor of rank q + r - indices_shape[-1] - 1. + + reduction: Type of reduction to apply: none (default), add, mul, max, min. + 'none': no reduction applied. 'add': reduction using the addition + operation. 'mul': reduction using the addition operation. 'max': + reduction using the maximum operation.'min': reduction using the minimum + operation. + """ + + schema = get_schema("ScatterND", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ScatterND", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), reduction=reduction) + + def Split( + self, + input: Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + split: Optional[INT64] = None, + axis: int = 0, + num_outputs: Optional[int] = None, + ) -> Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Split(18)](https://onnx.ai/onnx/operators/onnx__Split.html#split-18 "Online Documentation") + + Split a tensor into a list of tensors, along the specified 'axis'. + Either input 'split' or the attribute 'num_outputs' should be specified, but not both. + If the attribute 'num_outputs' is specified, then the tensor is split into equal sized parts. + If the tensor is not evenly splittable into `num_outputs`, the last chunk will be smaller. + If the input 'split' is specified, it indicates the sizes of each output in the split. + + + Args: + input: (differentiable) The tensor to split + + split: (optional, non-differentiable) Optional length of each output. Values + should be >= 0.Sum of the values must be equal to the dim value at + 'axis' specified. + + axis: Which axis to split on. A negative value means counting dimensions + from the back. Accepted range is [-rank, rank-1] where r = rank(input). + + num_outputs: Number of outputs to split parts of the tensor into. If the + tensor is not evenly splittable the last chunk will be smaller. + """ + + schema = get_schema("Split", 18, "") + op: Callable[ + ..., + Union[ + BFLOAT16, + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Split", schema) + return op( + *self._prepare_inputs(schema, input, split), axis=axis, num_outputs=num_outputs + ) diff --git a/onnxscript/onnx_opset/_impl/opset2.py b/onnxscript/onnx_opset/_impl/opset2.py new file mode 100644 index 0000000000..87d9a35b11 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset2.py @@ -0,0 +1,251 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset1 import Opset1 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset2(Opset1): + def __new__(cls): + return Opset.__new__(cls, "", 2) + + def __init__(self): + super().__init__() + + def GlobalLpPool( + self, X: Union[DOUBLE, FLOAT, FLOAT16], p: int = 2 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 GlobalLpPool(2)](https://onnx.ai/onnx/operators/onnx__GlobalLpPool.html#globallppool-2 "Online Documentation") + + + GlobalLpPool consumes an input tensor X and applies lp pool pooling across + the values in the same channel. This is equivalent to LpPool with kernel size + equal to the spatial dimension of input tensor. + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + for image case are (N x C x H x W), where N is the batch size, C is the + number of channels, and H and W are the height and the width of the + data. For non image case, the dimensions are in the form of (N x C x D1 + x D2 ... Dn), where N is the batch size. + + p: p value of the Lp norm used to pool over the input data. + """ + + schema = get_schema("GlobalLpPool", 2, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "GlobalLpPool", schema) + return op(*self._prepare_inputs(schema, X), p=p) + + def LpPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + p: int = 2, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LpPool(2)](https://onnx.ai/onnx/operators/onnx__LpPool.html#lppool-2 "Online Documentation") + + + LpPool consumes an input tensor X and applies Lp pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + Lp pooling consisting of computing the Lp norm on all values of a subset + of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + kernel_shape: The size of the kernel along each axis. + + p: p value of the Lp norm used to pool over the input data. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("LpPool", 2, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LpPool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + p=p, + pads=pads, + strides=strides, + ) + + def Pad( + self, + data: Union[DOUBLE, FLOAT, FLOAT16], + mode: str = "constant", + pads: Optional[Sequence[int]] = None, + value: float = 0.0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Pad(2)](https://onnx.ai/onnx/operators/onnx__Pad.html#pad-2 "Online Documentation") + + + Given `data` tensor, pads, mode, and value. + Example: + Insert 0 pads to the beginning of the second dimension. + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + pads = [0, 2, 0, 0] + output = [ + [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ], + ] + + + Args: + data: Input tensor. + + mode: Three modes: constant(default), reflect, edge + + pads: List of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D it is + the number of pixels. `pads` rank should be double of the input's rank. + `pads` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number of pixels added at the beginning + of axis `i` and xi_end, the number of pixels added at the end of axis + `i`. + + value: One float, indicates the value to be filled. + """ + + schema = get_schema("Pad", 2, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Pad", schema) + return op(*self._prepare_inputs(schema, data), mode=mode, pads=pads, value=value) + + def Split( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + split: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Split(2)](https://onnx.ai/onnx/operators/onnx__Split.html#split-2 "Online Documentation") + + Split a tensor into a list of tensors, along the specified + 'axis'. Lengths of the parts can be specified using argument 'split'. + Otherwise, the tensor is split to equal sized parts. + + + Args: + input: The tensor to split + + axis: Which axis to split on. + + split: length of each output + """ + + schema = get_schema("Split", 2, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Split", schema) + return op(*self._prepare_inputs(schema, input), axis=axis, split=split) diff --git a/onnxscript/onnx_opset/_impl/opset3.py b/onnxscript/onnx_opset/_impl/opset3.py new file mode 100644 index 0000000000..a21e794403 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset3.py @@ -0,0 +1,195 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset2 import Opset2 +from onnxscript.onnx_types import DOUBLE, FLOAT, FLOAT16, INT32 +from onnxscript.values import Op, Opset + + +class Opset3(Opset2): + def __new__(cls): + return Opset.__new__(cls, "", 3) + + def __init__(self): + super().__init__() + + def GRU( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + linear_before_reset: int = 0, + output_sequence: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 GRU(3)](https://onnx.ai/onnx/operators/onnx__GRU.html#gru-3 "Online Documentation") + + + Computes an one-layer GRU. This operator is usually supported via some custom + implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `z` - update gate + + `r` - reset gate + + `h` - hidden gate + + `t` - time step (t-1 means previous time step) + + `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates + + `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates + + `Wb[zrh]` - W bias vectors for update, reset, and hidden gates + + `Rb[zrh]` - R bias vectors for update, reset, and hidden gates + + `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates + + `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates + + `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates + + `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh): + + - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz) + + - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr) + + - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0 + + - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0 + + - Ht = (1 - zt) (.) ht + zt (.) Ht-1 + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` + (if bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, input_size]`. + + R: The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if + bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, hidden_size]`. + + B: (optional) The bias tensor for the gates. Concatenation of `[Wb[zrh], + Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension + 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If + not specified - assumed to be 0 + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 2 (or 4 if bidirectional) activation functions for + update, reset, and hidden gates. The activation functions must be one of + the activation functions specified above. Optional: See the equations + for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + linear_before_reset: When computing the output of the hidden gate, apply the + linear transformation before multiplying by the output of the reset + gate. + + output_sequence: The sequence output for the hidden is optional if 0. + Default 0. + """ + + schema = get_schema("GRU", 3, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "GRU", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + linear_before_reset=linear_before_reset, + output_sequence=output_sequence, + ) diff --git a/onnxscript/onnx_opset/_impl/opset4.py b/onnxscript/onnx_opset/_impl/opset4.py new file mode 100644 index 0000000000..52b4fca16d --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset4.py @@ -0,0 +1,113 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset3 import Opset3 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset4(Opset3): + def __new__(cls): + return Opset.__new__(cls, "", 4) + + def __init__(self): + super().__init__() + + def Concat( + self, + *inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Concat(4)](https://onnx.ai/onnx/operators/onnx__Concat.html#concat-4 "Online Documentation") + + Concatenate a list of tensors into a single tensor + + Args: + inputs: (variadic) List of tensors for concatenation + + axis: Which axis to concat on + """ + + schema = get_schema("Concat", 4, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Concat", schema) + return op(*self._prepare_inputs(schema, *inputs), axis=axis) diff --git a/onnxscript/onnx_opset/_impl/opset5.py b/onnxscript/onnx_opset/_impl/opset5.py new file mode 100644 index 0000000000..eee712934c --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset5.py @@ -0,0 +1,120 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset4 import Opset4 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset5(Opset4): + def __new__(cls): + return Opset.__new__(cls, "", 5) + + def __init__(self): + super().__init__() + + def Reshape( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: INT64, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Reshape(5)](https://onnx.ai/onnx/operators/onnx__Reshape.html#reshape-5 "Online Documentation") + + + Reshape the input tensor similar to numpy.reshape. + First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. + At most one dimension of the new shape can be -1. In this case, the value is + inferred from the size of the tensor and the remaining dimensions. A dimension + could also be 0, in which case the actual dimension value is unchanged (i.e. taken + from the input tensor). Shape (second input) could be an empty shape, which means converting to a scalar. + The input tensor's shape and the output tensor's shape are required to have the same number of elements. + + Args: + data: An input tensor. + + shape: Specified shape for output. + """ + + schema = get_schema("Reshape", 5, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Reshape", schema) + return op(*self._prepare_inputs(schema, data, shape)) diff --git a/onnxscript/onnx_opset/_impl/opset6.py b/onnxscript/onnx_opset/_impl/opset6.py new file mode 100644 index 0000000000..e8f7b579bd --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset6.py @@ -0,0 +1,988 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset5 import Opset5 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset6(Opset5): + def __new__(cls): + return Opset.__new__(cls, "", 6) + + def __init__(self): + super().__init__() + + def Abs( + self, + X: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Abs(6)](https://onnx.ai/onnx/operators/onnx__Abs.html#abs-6 "Online Documentation") + + + Absolute takes one input data (Tensor) and produces one output data + (Tensor) where the absolute is, y = abs(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Abs", 6, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Abs", schema) + return op(*self._prepare_inputs(schema, X)) + + def Add( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Add(6)](https://onnx.ai/onnx/operators/onnx__Add.html#add-6 "Online Documentation") + + + Performs element-wise binary addition (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + """ + + schema = get_schema("Add", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Add", schema + ) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def BatchNormalization( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + mean: Union[DOUBLE, FLOAT, FLOAT16], + var: Union[DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + is_test: int = 0, + momentum: float = 0.8999999761581421, + spatial: int = 1, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(6)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-6 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + there are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, mean, var, saved_mean, saved_var (training mode) + Output case #2: Y (test mode) + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + + scale: The scale as a 1-dimensional tensor of size C to be applied to the + output. + + B: The bias as a 1-dimensional tensor of size C to be applied to the output. + + mean: The running mean (training) or the estimated mean (testing) as a + 1-dimensional tensor of size C. + + var: The running variance (training) or the estimated variance (testing) as + a 1-dimensional tensor of size C. + + epsilon: The epsilon value to use to avoid division by zero, default is + 1e-5f. + + is_test: If set to nonzero, run spatial batch normalization in test mode, + default is 0. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum), default + is 0.9f. + + spatial: If true, compute the mean and variance across all spatial elements + If false, compute the mean and variance across per feature.Default is 1. + """ + + schema = get_schema("BatchNormalization", 6, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, mean, var), + epsilon=epsilon, + is_test=is_test, + momentum=momentum, + spatial=spatial, + ) + + def Cast( + self, + input: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + to: Optional[int] = None, + ) -> Union[ + BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Cast(6)](https://onnx.ai/onnx/operators/onnx__Cast.html#cast-6 "Online Documentation") + + + The operator casts the elements of a given input tensor to a data type + specified by the 'to' argument and returns an output tensor of the same size in + the converted type. The 'to' argument must be one of the data types specified + in the 'DataType' enum field in the TensorProto message. + NOTE: Casting to and from strings is not supported yet. + + + Args: + input: Input tensor to be cast. + + to: The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + """ + + schema = get_schema("Cast", 6, "") + op: Callable[ + ..., + Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Cast", schema) + return op(*self._prepare_inputs(schema, input), to=to) + + def Ceil(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Ceil(6)](https://onnx.ai/onnx/operators/onnx__Ceil.html#ceil-6 "Online Documentation") + + + Ceil takes one input data (Tensor) and produces one output data + (Tensor) where the ceil is, y = ceil(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Ceil", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Ceil", schema) + return op(*self._prepare_inputs(schema, X)) + + def Clip( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + max: float = 3.4028234663852886e38, + min: float = -3.4028234663852886e38, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Clip(6)](https://onnx.ai/onnx/operators/onnx__Clip.html#clip-6 "Online Documentation") + + + Clip operator limits the given input within an interval. The interval is + specified with arguments 'min' and 'max'. They default to + numeric_limits::lowest() and numeric_limits::max() respectively. + + + Args: + input: Input tensor whose elements to be clipped + + max: Maximum value, above which element is replaced by max + + min: Minimum value, under which element is replaced by min + """ + + schema = get_schema("Clip", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Clip", schema) + return op(*self._prepare_inputs(schema, input), max=max, min=min) + + def Div( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Div(6)](https://onnx.ai/onnx/operators/onnx__Div.html#div-6 "Online Documentation") + + + Performs element-wise binary division (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + """ + + schema = get_schema("Div", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Div", schema + ) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def Dropout( + self, data: Union[DOUBLE, FLOAT, FLOAT16], is_test: int = 0, ratio: float = 0.5 + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 Dropout(6)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-6 "Online Documentation") + + + Dropout takes one input data (Tensor) and produces two Tensor outputs, + output (Tensor) and mask (Tensor). Depending on whether it is in + test mode or not, the output Y will either be a random dropout, or a simple + copy of the input. Note that our implementation of Dropout does scaling in + the training phase, so during testing nothing needs to be done. + + + Args: + data: The input data as Tensor. + + is_test: (int, default 0) if nonzero, run dropout in test mode where the + output is simply Y = X. + + ratio: (float, default 0.5) the ratio of random dropout + """ + + schema = get_schema("Dropout", 6, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "Dropout", schema) + return op(*self._prepare_inputs(schema, data), is_test=is_test, ratio=ratio) + + def Elu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], alpha: float = 1.0 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Elu(6)](https://onnx.ai/onnx/operators/onnx__Elu.html#elu-6 "Online Documentation") + + + Elu takes one input data (Tensor) and produces one output data + (Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x < + 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. + + + + Args: + X: (differentiable) 1D input tensor + + alpha: Coefficient of ELU. + """ + + schema = get_schema("Elu", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Elu", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha) + + def Exp(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Exp(6)](https://onnx.ai/onnx/operators/onnx__Exp.html#exp-6 "Online Documentation") + + + Calculates the exponential of the given input tensor, element-wise. + + + Args: + input: Input tensor + """ + + schema = get_schema("Exp", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Exp", schema) + return op(*self._prepare_inputs(schema, input)) + + def Floor(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Floor(6)](https://onnx.ai/onnx/operators/onnx__Floor.html#floor-6 "Online Documentation") + + + Floor takes one input data (Tensor) and produces one output data + (Tensor) where the floor is, y = floor(x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Floor", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Floor", schema) + return op(*self._prepare_inputs(schema, X)) + + def Gemm( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + C: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.0, + beta: float = 1.0, + broadcast: int = 0, + transA: int = 0, + transB: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Gemm(6)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-6 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + Compute Y = alpha * A * B + beta * C, where input tensor A has + dimension (M X K), input tensor B has dimension (K X N), input tensor C and + output tensor Y have dimension (M X N). + If attribute broadcast is non-zero, input tensor C will be broadcasted to match + the dimension requirement. A will be transposed before doing the computation + if attribute transA is non-zero, same for B and transB. + + + Args: + A: Input tensor A + + B: Input tensor B + + C: Input tensor C + + alpha: Scalar multiplier for the product of input tensors A * B, the default + value is 1.0. + + beta: Scalar multiplier for input tensor C, the default value is 1.0. + + broadcast: Whether C should be broadcasted + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Gemm", schema) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + broadcast=broadcast, + transA=transA, + transB=transB, + ) + + def HardSigmoid( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 0.20000000298023224, + beta: float = 0.5, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 HardSigmoid(6)](https://onnx.ai/onnx/operators/onnx__HardSigmoid.html#hardsigmoid-6 "Online Documentation") + + + HardSigmoid takes one input data (Tensor) and produces one output data + (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), + is applied to the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + + alpha: Value of alpha. + + beta: Value of beta. + """ + + schema = get_schema("HardSigmoid", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "HardSigmoid", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha, beta=beta) + + def InstanceNormalization( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 InstanceNormalization(6)](https://onnx.ai/onnx/operators/onnx__InstanceNormalization.html#instancenormalization-6 "Online Documentation") + + + Carries out instance normalization as described in the paper + https://arxiv.org/abs/1607.08022. + + y = scale * (x - mean) / sqrt(variance + epsilon) + B, + where mean and variance are computed per instance per channel. + + + + Args: + input: (differentiable) Input data tensor from the previous operator; + dimensions for image case are (N x C x H x W), where N is the batch + size, C is the number of channels, and H and W are the height and the + width of the data. For non image case, the dimensions are in the form of + (N x C x D1 x D2 ... Dn), where N is the batch size. + + scale: (differentiable) The input 1-dimensional scale tensor of size C. + + B: (differentiable) The input 1-dimensional bias tensor of size C. + + epsilon: The epsilon value to use to avoid division by zero. + """ + + schema = get_schema("InstanceNormalization", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "InstanceNormalization", schema + ) + return op(*self._prepare_inputs(schema, input, scale, B), epsilon=epsilon) + + def LeakyRelu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], alpha: float = 0.009999999776482582 + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 LeakyRelu(6)](https://onnx.ai/onnx/operators/onnx__LeakyRelu.html#leakyrelu-6 "Online Documentation") + + + LeakyRelu takes input data (Tensor) and an argument alpha, and produces one + output data (Tensor) where the function `f(x) = alpha * x for x < 0`, + `f(x) = x for x >= 0`, is applied to the data tensor elementwise. + + + Args: + X: (differentiable) Input tensor + + alpha: Coefficient of leakage. + """ + + schema = get_schema("LeakyRelu", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "LeakyRelu", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha) + + def Log(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Log(6)](https://onnx.ai/onnx/operators/onnx__Log.html#log-6 "Online Documentation") + + + Calculates the natural log of the given input tensor, element-wise. + + + Args: + input: Input tensor + """ + + schema = get_schema("Log", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Log", schema) + return op(*self._prepare_inputs(schema, input)) + + def Max(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Max(6)](https://onnx.ai/onnx/operators/onnx__Max.html#max-6 "Online Documentation") + + + Element-wise max of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Max. + """ + + schema = get_schema("Max", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Max", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Mean(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mean(6)](https://onnx.ai/onnx/operators/onnx__Mean.html#mean-6 "Online Documentation") + + + Element-wise mean of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Mean. + """ + + schema = get_schema("Mean", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mean", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Min(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Min(6)](https://onnx.ai/onnx/operators/onnx__Min.html#min-6 "Online Documentation") + + + Element-wise min of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Min + """ + + schema = get_schema("Min", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Min", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Mul( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Mul(6)](https://onnx.ai/onnx/operators/onnx__Mul.html#mul-6 "Online Documentation") + + + Performs element-wise binary multiplication (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + """ + + schema = get_schema("Mul", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Mul", schema + ) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def Neg( + self, X: Union[DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8] + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8]: + r"""[🌐 Neg(6)](https://onnx.ai/onnx/operators/onnx__Neg.html#neg-6 "Online Documentation") + + + Neg takes one input data (Tensor) and produces one output data + (Tensor) where each element flipped sign, y = -x, is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Neg", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8]] = Op( + self, "Neg", schema + ) + return op(*self._prepare_inputs(schema, X)) + + def PRelu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], slope: Union[DOUBLE, FLOAT, FLOAT16] + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 PRelu(6)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-6 "Online Documentation") + + + + PRelu takes input data (Tensor) and slope tensor as input, and produces one + output data (Tensor) where the function `f(x) = slope * x for x < 0`, + `f(x) = x for x >= 0`., is applied to the data tensor elementwise. + + + + Args: + X: Input tensor + + slope: Slope tensor. If `Slope` is of size 1, the value is sharedacross + different channels + """ + + schema = get_schema("PRelu", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "PRelu", schema) + return op(*self._prepare_inputs(schema, X, slope)) + + def Reciprocal(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Reciprocal(6)](https://onnx.ai/onnx/operators/onnx__Reciprocal.html#reciprocal-6 "Online Documentation") + + + Reciprocal takes one input data (Tensor) and produces one output data + (Tensor) where the reciprocal is, y = 1/x, is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Reciprocal", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Reciprocal", schema) + return op(*self._prepare_inputs(schema, X)) + + def Relu(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Relu(6)](https://onnx.ai/onnx/operators/onnx__Relu.html#relu-6 "Online Documentation") + + + Relu takes one input data (Tensor) and produces one output data + (Tensor) where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Relu", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Relu", schema) + return op(*self._prepare_inputs(schema, X)) + + def Selu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.6732631921768188, + gamma: float = 1.0507010221481323, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Selu(6)](https://onnx.ai/onnx/operators/onnx__Selu.html#selu-6 "Online Documentation") + + + Selu takes one input data (Tensor) and produces one output data + (Tensor) where the scaled exponential linear unit function, + `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`, + is applied to the tensor elementwise. + + + Args: + X: (differentiable) Input tensor + + alpha: Coefficient of SELU default to 1.67326319217681884765625 (i.e., + float32 approximation of 1.6732632423543772848170429916717). + + gamma: Coefficient of SELU default to 1.05070102214813232421875 (i.e., + float32 approximation of 1.0507009873554804934193349852946). + """ + + schema = get_schema("Selu", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Selu", schema) + return op(*self._prepare_inputs(schema, X), alpha=alpha, gamma=gamma) + + def Sigmoid(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sigmoid(6)](https://onnx.ai/onnx/operators/onnx__Sigmoid.html#sigmoid-6 "Online Documentation") + + + Sigmoid takes one input data (Tensor) and produces one output data + (Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the + tensor elementwise. + + + Args: + X: Input tensor + """ + + schema = get_schema("Sigmoid", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sigmoid", schema) + return op(*self._prepare_inputs(schema, X)) + + def Sqrt(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sqrt(6)](https://onnx.ai/onnx/operators/onnx__Sqrt.html#sqrt-6 "Online Documentation") + + + Square root takes one input data (Tensor) and produces one output data + (Tensor) where the square root is, y = x^0.5, is applied to + the tensor elementwise. If x is negative, then it will return NaN. + + + Args: + X: Input tensor + """ + + schema = get_schema("Sqrt", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sqrt", schema) + return op(*self._prepare_inputs(schema, X)) + + def Sub( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + axis: Optional[int] = None, + broadcast: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Sub(6)](https://onnx.ai/onnx/operators/onnx__Sub.html#sub-6 "Online Documentation") + + + Performs element-wise binary subtraction (with limited broadcast support). + + If necessary the right-hand-side argument will be broadcasted to match the + shape of left-hand-side argument. When broadcasting is specified, the second + tensor can either be of element size 1 (including a scalar tensor and any + tensor with rank equal to or smaller than the first tensor), or having its + shape as a contiguous subset of the first tensor's shape. The starting of the + mutually equal shape is specified by the argument "axis", and if it is not set, + suffix matching is assumed. 1-dim expansion doesn't work yet. + + For example, the following tensor shapes are supported (with broadcast=1): + + shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor + shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor + shape(A) = (2, 3, 4, 5), shape(B) = (5,) + shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + + Attribute `broadcast=1` needs to be passed to enable broadcasting. + + + Args: + A: First operand, should share the type with the second operand. + + B: Second operand. With broadcasting can be of smaller size than A. If + broadcasting is disabled it should be of the same size. + + axis: If set, defines the broadcast dimensions. See doc for details. + + broadcast: Pass 1 to enable broadcasting + """ + + schema = get_schema("Sub", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Sub", schema + ) + return op(*self._prepare_inputs(schema, A, B), axis=axis, broadcast=broadcast) + + def Sum(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sum(6)](https://onnx.ai/onnx/operators/onnx__Sum.html#sum-6 "Online Documentation") + + + Element-wise sum of each of the input tensors. All inputs and outputs must + have the same shape and data type. + + + Args: + data_0: (variadic) List of tensors for Sum. + """ + + schema = get_schema("Sum", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sum", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Tanh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Tanh(6)](https://onnx.ai/onnx/operators/onnx__Tanh.html#tanh-6 "Online Documentation") + + + Calculates the hyperbolic tangent of the given input tensor element-wise. + + + Args: + input: Input tensor + """ + + schema = get_schema("Tanh", 6, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Tanh", schema) + return op(*self._prepare_inputs(schema, input)) + + def Tile( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + repeats: INT64, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Tile(6)](https://onnx.ai/onnx/operators/onnx__Tile.html#tile-6 "Online Documentation") + + Constructs a tensor by tiling a given tensor. + This is the same as function `tile` in Numpy, but no broadcast. + For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] + + + Args: + input: Input tensor of any shape. + + repeats: 1D int64 tensor of the same length as input's dimension number, + includes numbers of repeated copies along input's dimensions. + """ + + schema = get_schema("Tile", 6, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Tile", schema) + return op(*self._prepare_inputs(schema, input, repeats)) diff --git a/onnxscript/onnx_opset/_impl/opset7.py b/onnxscript/onnx_opset/_impl/opset7.py new file mode 100644 index 0000000000..3aa6528fde --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset7.py @@ -0,0 +1,1256 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset6 import Opset6 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset7(Opset6): + def __new__(cls): + return Opset.__new__(cls, "", 7) + + def __init__(self): + super().__init__() + + def Acos(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Acos(7)](https://onnx.ai/onnx/operators/onnx__Acos.html#acos-7 "Online Documentation") + + + Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Acos", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Acos", schema) + return op(*self._prepare_inputs(schema, input)) + + def Add( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Add(7)](https://onnx.ai/onnx/operators/onnx__Add.html#add-7 "Online Documentation") + + + Performs element-wise binary addition (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First operand. + + B: Second operand. + """ + + schema = get_schema("Add", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Add", schema + ) + return op(*self._prepare_inputs(schema, A, B)) + + def And(self, A: BOOL, B: BOOL) -> BOOL: + r"""[🌐 And(7)](https://onnx.ai/onnx/operators/onnx__And.html#and-7 "Online Documentation") + + + Returns the tensor resulted from performing the `and` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("And", 7, "") + op: Callable[..., BOOL] = Op(self, "And", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Asin(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Asin(7)](https://onnx.ai/onnx/operators/onnx__Asin.html#asin-7 "Online Documentation") + + + Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Asin", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Asin", schema) + return op(*self._prepare_inputs(schema, input)) + + def Atan(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Atan(7)](https://onnx.ai/onnx/operators/onnx__Atan.html#atan-7 "Online Documentation") + + + Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Atan", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Atan", schema) + return op(*self._prepare_inputs(schema, input)) + + def AveragePool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + count_include_pad: int = 0, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 AveragePool(7)](https://onnx.ai/onnx/operators/onnx__AveragePool.html#averagepool-7 "Online Documentation") + + + AveragePool consumes an input tensor X and applies average pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + average pooling consisting of computing the average on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + count_include_pad: Whether include pad pixels when calculating values for + the edges. Default is 0, doesn't count include pad. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("AveragePool", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "AveragePool", schema) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + count_include_pad=count_include_pad, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def BatchNormalization( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + mean: Union[DOUBLE, FLOAT, FLOAT16], + var: Union[DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + momentum: float = 0.8999999761581421, + spatial: int = 1, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(7)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-7 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + there are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, mean, var, saved_mean, saved_var (training mode) + Output case #2: Y (test mode) + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + + scale: If spatial is true, the dimension of scale is (C). If spatial is + false, the dimensions of scale are (C x D1 x ... x Dn) + + B: If spatial is true, the dimension of bias is (C). If spatial is false, + the dimensions of bias are (C x D1 x ... x Dn) + + mean: If spatial is true, the dimension of the running mean (training) or + the estimated mean (testing) is (C). If spatial is false, the dimensions + of the running mean (training) or the estimated mean (testing) are (C x + D1 x ... x Dn). + + var: If spatial is true, the dimension of the running variance(training) or + the estimated variance (testing) is (C). If spatial is false, the + dimensions of the running variance(training) or the estimated variance + (testing) are (C x D1 x ... x Dn). + + epsilon: The epsilon value to use to avoid division by zero. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum). + + spatial: If true, compute the mean and variance across per activation. If + false, compute the mean and variance across per feature over each + mini-batch. + """ + + schema = get_schema("BatchNormalization", 7, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, mean, var), + epsilon=epsilon, + momentum=momentum, + spatial=spatial, + ) + + def Cos(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Cos(7)](https://onnx.ai/onnx/operators/onnx__Cos.html#cos-7 "Online Documentation") + + + Calculates the cosine of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Cos", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Cos", schema) + return op(*self._prepare_inputs(schema, input)) + + def Div( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Div(7)](https://onnx.ai/onnx/operators/onnx__Div.html#div-7 "Online Documentation") + + + Performs element-wise binary division (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First operand. + + B: Second operand. + """ + + schema = get_schema("Div", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Div", schema + ) + return op(*self._prepare_inputs(schema, A, B)) + + def Dropout( + self, data: Union[DOUBLE, FLOAT, FLOAT16], ratio: float = 0.5 + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 Dropout(7)](https://onnx.ai/onnx/operators/onnx__Dropout.html#dropout-7 "Online Documentation") + + + Dropout takes one input data (Tensor) and produces two Tensor outputs, + output (Tensor) and mask (Tensor). Depending on whether it is in + test mode or not, the output Y will either be a random dropout, or a simple + copy of the input. Note that our implementation of Dropout does scaling in + the training phase, so during testing nothing needs to be done. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + data: The input data as Tensor. + + ratio: The ratio of random dropout + """ + + schema = get_schema("Dropout", 7, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "Dropout", schema) + return op(*self._prepare_inputs(schema, data), ratio=ratio) + + def Equal(self, A: Union[BOOL, INT32, INT64], B: Union[BOOL, INT32, INT64]) -> BOOL: + r"""[🌐 Equal(7)](https://onnx.ai/onnx/operators/onnx__Equal.html#equal-7 "Online Documentation") + + + Returns the tensor resulted from performing the `equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Equal", 7, "") + op: Callable[..., BOOL] = Op(self, "Equal", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def GRU( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + linear_before_reset: int = 0, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 GRU(7)](https://onnx.ai/onnx/operators/onnx__GRU.html#gru-7 "Online Documentation") + + + Computes an one-layer GRU. This operator is usually supported via some custom + implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `z` - update gate + + `r` - reset gate + + `h` - hidden gate + + `t` - time step (t-1 means previous time step) + + `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates + + `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates + + `Wb[zrh]` - W bias vectors for update, reset, and hidden gates + + `Rb[zrh]` - R bias vectors for update, reset, and hidden gates + + `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates + + `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates + + `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates + + `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh): + + - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) + + - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) + + - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 + + - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 + + - Ht = (1 - zt) (.) ht + zt (.) Ht-1 + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` + (if bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, input_size]`. + + R: The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if + bidirectional) along dimension 0. This tensor has shape + `[num_directions, 3*hidden_size, hidden_size]`. + + B: (optional) The bias tensor for the gates. Concatenation of `[Wb[zrh], + Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension + 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If + not specified - assumed to be 0 + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 2 (or 4 if bidirectional) activation functions for + update, reset, and hidden gates. The activation functions must be one of + the activation functions specified above. Optional: See the equations + for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + linear_before_reset: When computing the output of the hidden gate, apply the + linear transformation before multiplying by the output of the reset + gate. + """ + + schema = get_schema("GRU", 7, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "GRU", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + linear_before_reset=linear_before_reset, + ) + + def Gemm( + self, + A: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + C: Union[DOUBLE, FLOAT, FLOAT16], + alpha: float = 1.0, + beta: float = 1.0, + transA: int = 0, + transB: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Gemm(7)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-7 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + + A' = transpose(A) if transA else A + + B' = transpose(B) if transB else B + + Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), + input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), + and output tensor Y has shape (M, N). A will be transposed before doing the + computation if attribute transA is non-zero, same for B and transB. + This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check `Broadcasting in ONNX `_. + + Args: + A: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) + if transA is non-zero. + + B: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) + if transB is non-zero. + + C: Input tensor C. The shape of C should be unidirectional broadcastable to + (M, N). + + alpha: Scalar multiplier for the product of input tensors A * B. + + beta: Scalar multiplier for input tensor C. + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Gemm", schema) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + transA=transA, + transB=transB, + ) + + def Greater( + self, A: Union[DOUBLE, FLOAT, FLOAT16], B: Union[DOUBLE, FLOAT, FLOAT16] + ) -> BOOL: + r"""[🌐 Greater(7)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-7 "Online Documentation") + + + Returns the tensor resulted from performing the `greater` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Greater", 7, "") + op: Callable[..., BOOL] = Op(self, "Greater", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def LSTM( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + initial_c: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + P: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Optional[Sequence[str]] = None, + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + input_forget: int = 0, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 LSTM(7)](https://onnx.ai/onnx/operators/onnx__LSTM.html#lstm-7 "Online Documentation") + + + Computes an one-layer LSTM. This operator is usually supported via some + custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `o` - output gate + + `f` - forget gate + + `c` - cell gate + + `t` - time step (t-1 means previous time step) + + `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates + + `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates + + `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates + + `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates + + `P[iof]` - P peephole weight vector for input, output, and forget gates + + `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates + + `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates + + `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates + + `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates + + `PB[iof]` - P peephole weight vector for backward input, output, and forget gates + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): + + - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) + + - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) + + - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) + + - Ct = ft (.) Ct-1 + it (.) ct + + - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) + + - Ht = ot (.) h(Ct) + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for the gates. Concatenation of `W[iofc]` and + `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape + `[num_directions, 4*hidden_size, input_size]`. + + R: The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` + (if bidirectional) along dimension 0. This tensor has shape + `[num_directions, 4*hidden_size, hidden_size]`. + + B: (optional) The bias tensor for input gate. Concatenation of `[Wb[iofc], + Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along + dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. + Optional: If not specified - assumed to be 0. + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + initial_c: (optional) Optional initial value of the cell. If not specified - + assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + P: (optional) The weight tensor for peepholes. Concatenation of `P[iof]` and + `PB[iof]` (if bidirectional) along dimension 0. It has shape + `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed + to be 0. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: A list of 3 (or 6 if bidirectional) activation functions for + input, output, forget, cell, and hidden. The activation functions must + be one of the activation functions specified above. Optional: See the + equations for default if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + + input_forget: Couple the input and forget gates if 1. + """ + + schema = get_schema("LSTM", 7, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "LSTM", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h, initial_c, P), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + input_forget=input_forget, + ) + + def Less(self, A: Union[DOUBLE, FLOAT, FLOAT16], B: Union[DOUBLE, FLOAT, FLOAT16]) -> BOOL: + r"""[🌐 Less(7)](https://onnx.ai/onnx/operators/onnx__Less.html#less-7 "Online Documentation") + + + Returns the tensor resulted from performing the `less` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Less", 7, "") + op: Callable[..., BOOL] = Op(self, "Less", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def Mul( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Mul(7)](https://onnx.ai/onnx/operators/onnx__Mul.html#mul-7 "Online Documentation") + + + Performs element-wise binary multiplication (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First operand. + + B: Second operand. + """ + + schema = get_schema("Mul", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Mul", schema + ) + return op(*self._prepare_inputs(schema, A, B)) + + def Multinomial( + self, + input: Union[DOUBLE, FLOAT, FLOAT16], + dtype: int = 6, + sample_size: int = 1, + seed: Optional[float] = None, + ) -> Union[INT32, INT64]: + r"""[🌐 Multinomial(7)](https://onnx.ai/onnx/operators/onnx__Multinomial.html#multinomial-7 "Online Documentation") + + + Generate a tensor of samples from a multinomial distribution according to the probabilities + of each of the possible outcomes. + + + Args: + input: Input tensor with shape [batch_size, class_size], where class_size is + the number of all possible outcomes. Each value along the axis zero + represents the unnormalized log-probability of each corresponding + outcome in a batch. + + dtype: (Optional) The data type for the elements of the output tensor, if + not specified, we will use int32. + + sample_size: Number of times to sample. + + seed: (Optional) Seed to the random generator, if not specified we will auto + generate one. + """ + + schema = get_schema("Multinomial", 7, "") + op: Callable[..., Union[INT32, INT64]] = Op(self, "Multinomial", schema) + return op( + *self._prepare_inputs(schema, input), + dtype=dtype, + sample_size=sample_size, + seed=seed, + ) + + def Or(self, A: BOOL, B: BOOL) -> BOOL: + r"""[🌐 Or(7)](https://onnx.ai/onnx/operators/onnx__Or.html#or-7 "Online Documentation") + + + Returns the tensor resulted from performing the `or` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("Or", 7, "") + op: Callable[..., BOOL] = Op(self, "Or", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def PRelu( + self, X: Union[DOUBLE, FLOAT, FLOAT16], slope: Union[DOUBLE, FLOAT, FLOAT16] + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 PRelu(7)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-7 "Online Documentation") + + + PRelu takes input data (Tensor) and slope tensor as input, and produces one + output data (Tensor) where the function `f(x) = slope * x for x < 0`, + `f(x) = x for x >= 0`., is applied to the data tensor elementwise. + This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check `Broadcasting in ONNX `_. + + Args: + X: Input tensor + + slope: Slope tensor. The shape of slope can be smaller then first input X; + if so, its shape must be unidirectional broadcastable to X + """ + + schema = get_schema("PRelu", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "PRelu", schema) + return op(*self._prepare_inputs(schema, X, slope)) + + def Pow( + self, X: Union[DOUBLE, FLOAT, FLOAT16], Y: Union[DOUBLE, FLOAT, FLOAT16] + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Pow(7)](https://onnx.ai/onnx/operators/onnx__Pow.html#pow-7 "Online Documentation") + + + Pow takes input data (Tensor) and exponent Tensor, and + produces one output data (Tensor) where the function `f(x) = x^exponent`, + is applied to the data tensor elementwise. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + X: First operand, base of the exponent. + + Y: Second operand, power of the exponent. + """ + + schema = get_schema("Pow", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Pow", schema) + return op(*self._prepare_inputs(schema, X, Y)) + + def RNN( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + W: Union[DOUBLE, FLOAT, FLOAT16], + R: Union[DOUBLE, FLOAT, FLOAT16], + B: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + sequence_lens: Optional[INT32] = None, + initial_h: Optional[Union[DOUBLE, FLOAT, FLOAT16]] = None, + activation_alpha: Optional[Sequence[float]] = None, + activation_beta: Optional[Sequence[float]] = None, + activations: Sequence[str] = ("Tanh", "Tanh"), + clip: Optional[float] = None, + direction: str = "forward", + hidden_size: Optional[int] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]]: + r"""[🌐 RNN(7)](https://onnx.ai/onnx/operators/onnx__RNN.html#rnn-7 "Online Documentation") + + + Computes an one-layer simple RNN. This operator is usually supported + via some custom implementation such as CuDNN. + + Notations: + + `X` - input tensor + + `i` - input gate + + `t` - time step (t-1 means previous time step) + + `Wi` - W parameter weight matrix for input gate + + `Ri` - R recurrence weight matrix for input gate + + `Wbi` - W parameter bias vector for input gate + + `Rbi` - R parameter bias vector for input gate + + `WBi` - W parameter weight matrix for backward input gate + + `RBi` - R recurrence weight matrix for backward input gate + + `WBbi` - WR bias vectors for backward input gate + + `RBbi` - RR bias vectors for backward input gate + + `H` - Hidden state + + `num_directions` - 2 if direction == bidirectional else 1 + + Activation functions: + + Relu(x) - max(0, x) + + Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + Sigmoid(x) - 1/(1 + e^{-x}) + + (NOTE: Below are optional) + + Affine(x) - alpha*x + beta + + LeakyRelu(x) - x if x >= 0 else alpha * x + + ThresholdedRelu(x) - x if x >= alpha else 0 + + ScaledTanh(x) - alpha*Tanh(beta*x) + + HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + Softsign(x) - x/(1 + |x|) + + Softplus(x) - log(1 + e^x) + + Equations (Default: f=Tanh): + + - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of `[seq_length, batch_size, input_size]`. + + W: The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if + bidirectional). The tensor has shape `[num_directions, hidden_size, + input_size]`. + + R: The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if + bidirectional). The tensor has shape `[num_directions, hidden_size, + hidden_size]`. + + B: (optional) The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` + and `[WBbi, RBbi]` (if bidirectional). The tensor has shape + `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed + to be 0. + + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If not specified - assumed all sequences in the + batch to have length `seq_length`. It has shape `[batch_size]`. + + initial_h: (optional) Optional initial value of the hidden. If not specified + - assumed to be 0. It has shape `[num_directions, batch_size, + hidden_size]`. + + activation_alpha: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators.For example with LeakyRelu, the default + alpha is 0.01. + + activation_beta: Optional scaling values used by some activation functions. + The values are consumed in the order of activation functions, for + example (f, g, h) in LSTM. Default values are the same as of + corresponding ONNX operators. + + activations: One (or two if bidirectional) activation function for input + gate. The activation function must be one of the activation functions + specified above. Optional: Default `Tanh` if not specified. + + clip: Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. + + direction: Specify if the RNN is forward, reverse, or bidirectional. Must be + one of forward (default), reverse, or bidirectional. + + hidden_size: Number of neurons in the hidden layer + """ + + schema = get_schema("RNN", 7, "") + op: Callable[ + ..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], Union[DOUBLE, FLOAT, FLOAT16]] + ] = Op(self, "RNN", schema) + return op( + *self._prepare_inputs(schema, X, W, R, B, sequence_lens, initial_h), + activation_alpha=activation_alpha, + activation_beta=activation_beta, + activations=activations, + clip=clip, + direction=direction, + hidden_size=hidden_size, + ) + + def Sin(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sin(7)](https://onnx.ai/onnx/operators/onnx__Sin.html#sin-7 "Online Documentation") + + + Calculates the sine of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Sin", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sin", schema) + return op(*self._prepare_inputs(schema, input)) + + def Sub( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Sub(7)](https://onnx.ai/onnx/operators/onnx__Sub.html#sub-7 "Online Documentation") + + + Performs element-wise binary subtraction (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First operand. + + B: Second operand. + """ + + schema = get_schema("Sub", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Sub", schema + ) + return op(*self._prepare_inputs(schema, A, B)) + + def Tan(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Tan(7)](https://onnx.ai/onnx/operators/onnx__Tan.html#tan-7 "Online Documentation") + + + Calculates the tangent of the given input tensor, element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Tan", 7, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Tan", schema) + return op(*self._prepare_inputs(schema, input)) + + def Upsample( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + mode: str = "nearest", + scales: Optional[Sequence[float]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Upsample(7)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-7 "Online Documentation") + + + Upsample the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * scale). + + + Args: + X: N-D tensor + + mode: Two interpolation modes: nearest (default), and linear (including + bilinear, trilinear, etc) + + scales: The scale array along each dimension. It takes value greater than or + equal to 1. The number of elements of 'scales' should be the same as the + rank of input 'X'. + """ + + schema = get_schema("Upsample", 7, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Upsample", schema) + return op(*self._prepare_inputs(schema, X), mode=mode, scales=scales) + + def Xor(self, A: BOOL, B: BOOL) -> BOOL: + r"""[🌐 Xor(7)](https://onnx.ai/onnx/operators/onnx__Xor.html#xor-7 "Online Documentation") + + + Returns the tensor resulted from performing the `xor` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: (non-differentiable) First input operand for the logical operator. + + B: (non-differentiable) Second input operand for the logical operator. + """ + + schema = get_schema("Xor", 7, "") + op: Callable[..., BOOL] = Op(self, "Xor", schema) + return op(*self._prepare_inputs(schema, A, B)) diff --git a/onnxscript/onnx_opset/_impl/opset8.py b/onnxscript/onnx_opset/_impl/opset8.py new file mode 100644 index 0000000000..817cbfecac --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset8.py @@ -0,0 +1,498 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset7 import Opset7 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset8(Opset7): + def __new__(cls): + return Opset.__new__(cls, "", 8) + + def __init__(self): + super().__init__() + + def Expand( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + shape: INT64, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Expand(8)](https://onnx.ai/onnx/operators/onnx__Expand.html#expand-8 "Online Documentation") + + + Broadcast the input tensor following the given shape and the broadcast rule. + The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): + Dimensions are right alignment; + Two corresponding dimensions must have the same value, or one of them is equal to 1. + Also, this operator is similar to numpy.broadcast_to(input, shape), + but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). + It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, + or the shape.ndim < input.shape.ndim. + + + Args: + input: Input tensor + + shape: A 1-D tensor indicates the shape you want to expand to, following the + broadcast rule + """ + + schema = get_schema("Expand", 8, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Expand", schema) + return op(*self._prepare_inputs(schema, input, shape)) + + def Max(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Max(8)](https://onnx.ai/onnx/operators/onnx__Max.html#max-8 "Online Documentation") + + + Element-wise max of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for max. + """ + + schema = get_schema("Max", 8, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Max", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def MaxPool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + auto_pad: str = "NOTSET", + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + storage_order: int = 0, + strides: Optional[Sequence[int]] = None, + ) -> Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]: + r"""[🌐 MaxPool(8)](https://onnx.ai/onnx/operators/onnx__MaxPool.html#maxpool-8 "Online Documentation") + + + MaxPool consumes an input tensor X and applies max pooling across + the tensor according to kernel sizes, stride sizes, and pad lengths. + max pooling consisting of computing the max on all values of a + subset of the input tensor according to the kernel size and downsampling the + data into the output tensor Y for further processing. The output spatial shape will be following: + ``` + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + + * pad_shape[i] is sum of pads along axis i + ``` + + `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: + ``` + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + ``` + And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: + ``` + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + ``` + The output of each pooling window is maximum number of elements exclude pad. + + + Args: + X: Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. + + auto_pad: auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. + Where default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial + size match the input.In case of odd number add the extra padding at the + end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no + padding. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + storage_order: The storage order of the tensor. 0 is row major, and 1 is + column major. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("MaxPool", 8, "") + op: Callable[..., Tuple[Union[DOUBLE, FLOAT, FLOAT16], INT64]] = Op( + self, "MaxPool", schema + ) + return op( + *self._prepare_inputs(schema, X), + auto_pad=auto_pad, + kernel_shape=kernel_shape, + pads=pads, + storage_order=storage_order, + strides=strides, + ) + + def Mean(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Mean(8)](https://onnx.ai/onnx/operators/onnx__Mean.html#mean-8 "Online Documentation") + + + Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for mean. + """ + + schema = get_schema("Mean", 8, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Mean", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Min(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Min(8)](https://onnx.ai/onnx/operators/onnx__Min.html#min-8 "Online Documentation") + + + Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for min. + """ + + schema = get_schema("Min", 8, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Min", schema) + return op(*self._prepare_inputs(schema, *data_0)) + + def Scan( + self, + sequence_lens: Optional[INT64], + *initial_state_and_scan_inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + directions: Optional[Sequence[int]] = None, + num_scan_inputs: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scan(8)](https://onnx.ai/onnx/operators/onnx__Scan.html#scan-8 "Online Documentation") + + + Scan can be used to iterate over one or more scan_input tensors, + constructing zero or more scan_output tensors. It combines ideas from general recurrences, + functional programming constructs such as scan, fold, map, and zip, and is intended to enable + generalizations of RNN-like constructs for sequence-to-sequence processing. + Other tensors (referred to as state_variables here) can be used to carry a state + when iterating from one element to another (similar to hidden-state in RNNs, also referred + to as loop-carried dependences in the context of loops). All these tensors are required to + have the same shape in each iteration of the loop (a restriction imposed to enable efficient + memory allocation). Many common usages involve a single scan_input tensor (where functionality + similar to scan, fold and map can be obtained). When more than one scan_input is used, + a behavior similar to zip is obtained. + + The attribute body must be a graph, specifying the computation to be performed in + every iteration. It takes as input the current values of the state_variables and + the current iterated element of the scan_inputs. It must return the (updated) values + of the state_variables and zero or more scan_output_element tensors. The values of the + scan_output_element tensors are concatenated over all the iterations to produce the + scan_output values of the scan construct (similar to the concatenated intermediate + hidden-state values of RNN-like constructs). + + The scan operation returns the final values of the state_variables as well as the + scan_outputs. + + The operation supports batching, and the batch-axis is required to be 0. + When multiple scan_input tensors are used, they must all have the same batch-size, + and they must all have the same maximum-sequence-length (the dimensionality of the + sequence axis or scan axis). The sequence axis or scan axis is required to be 1. + + The operation has an optional sequence_lens input (of shape [BATCH_SIZE]) to + allow variable length sequences of length <= the maximum-sequence-length. If this + input is not specified, all sequences are assumed to be of length equal to + maximum-sequence-length. For variable length input sequences, the scan_outputs + will consist of a sequence of same length as the input, padded to the + maximum-sequence-length. + + The optional attribute directions can be used to scan a sequence in the reverse direction. + If this attribute is omitted, all sequences are scanned in the forward direction. + A bidirectional scan be performed by specifying the same tensor input twice in the + scan_inputs, once with a forward direction, and once with a backward direction. + + Note that because of the ONNX restriction that only the last parameter of an operator can + be variadic, the initial-states and scan-inputs are listed together as one input parameter. + Similarly, the final-states and scan-outputs are listed together as one output parameter. + The attribute num_scan_inputs indicates the number M of scan-inputs. + + The behavior of + + Scan < + num_scan_inputs = m, + body = loop-body + > (sequence_lengths, init_1, ..., init_n, scan_1, ..., scan_m) + + is equivalent to the following pseudo-code: + + // T.shape[0] denotes the batch-size of T + // The batch-size of scan_1, ..., scan_m are all required to be equal + batch_size = scan_1.shape[0]; + + // scan_i.shape[1] denotes the (max) sequence-length of scan_i + // scan_i.shape[1] is required to be equal to scan_j.shape[1] for all i,j. + max_sequence_length = scan_1.shape[1]; + + for (int batch = 0; batch < batch_size; ++batch) { + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + N = (sequence_lengths specified) ? sequence_lengths[batch] : max_sequence_length; + + // execute loop + for (int t = 0; t < N; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = (scan_1[batch])[t]; + ... ; + si_m = (scan_m[batch])[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + // accumulate the outputs for this batch: + bst_1[batch] = st_1; ..., bst_n[batch] = st_n; + // Note scan-outputs will have size max_sequence_length, but only first N values will be meaningful. + // The remaining values have an undefined value. + b_scan_out_1[batch] = scan_out_1; ...; b_scan_out_k[batch] = scan_out_k; + } + return bst_1, ..., bst_n, b_scan_out_1, ..., b_scan_out_k; + + + + *Sample usage: Encoding RNN using a Scan* + + The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, + recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can + be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes + %Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these + values are computed in the outer graph, they need to be passed in as extra state_variables. + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1]("", %H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + + + + Args: + sequence_lens: (optional) Optional tensor specifying lengths of the + sequences in a batch. If this input is not specified, all sequences are + assumed to be of the maximum sequence length (the dimension of the + sequence axis of the scan_input tensors). + + initial_state_and_scan_inputs: (variadic, heterogeneous) Initial values of + the loop's N state variables followed by M scan_inputs + + body: The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. + + directions: An optional list of M flags. The i-th element of the list + specifies the direction to be scanned for the i-th scan_input tensor: 0 + indicates forward direction and 1 indicates reverse direction. If + omitted, all scan_input tensors will be scanned in the forward + direction. + + num_scan_inputs: An attribute specifying the number of scan_inputs M. + """ + + schema = get_schema("Scan", 8, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scan", schema) + return op( + *self._prepare_inputs(schema, sequence_lens, *initial_state_and_scan_inputs), + body=body, + directions=directions, + num_scan_inputs=num_scan_inputs, + ) + + def Sum(self, *data_0: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sum(8)](https://onnx.ai/onnx/operators/onnx__Sum.html#sum-8 "Online Documentation") + + + Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). + All inputs and outputs must have the same data type. + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + data_0: (variadic) List of tensors for sum. + """ + + schema = get_schema("Sum", 8, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sum", schema) + return op(*self._prepare_inputs(schema, *data_0)) diff --git a/onnxscript/onnx_opset/_impl/opset9.py b/onnxscript/onnx_opset/_impl/opset9.py new file mode 100644 index 0000000000..cba6b523e2 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset9.py @@ -0,0 +1,1785 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import GraphProto, TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset8 import Opset8 +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset9(Opset8): + def __new__(cls): + return Opset.__new__(cls, "", 9) + + def __init__(self): + super().__init__() + + def Acosh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Acosh(9)](https://onnx.ai/onnx/operators/onnx__Acosh.html#acosh-9 "Online Documentation") + + + Calculates the hyperbolic arccosine of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Acosh", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Acosh", schema) + return op(*self._prepare_inputs(schema, input)) + + def Asinh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Asinh(9)](https://onnx.ai/onnx/operators/onnx__Asinh.html#asinh-9 "Online Documentation") + + + Calculates the hyperbolic arcsine of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Asinh", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Asinh", schema) + return op(*self._prepare_inputs(schema, input)) + + def Atanh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Atanh(9)](https://onnx.ai/onnx/operators/onnx__Atanh.html#atanh-9 "Online Documentation") + + + Calculates the hyperbolic arctangent of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Atanh", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Atanh", schema) + return op(*self._prepare_inputs(schema, input)) + + def BatchNormalization( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + scale: Union[DOUBLE, FLOAT, FLOAT16], + B: Union[DOUBLE, FLOAT, FLOAT16], + mean: Union[DOUBLE, FLOAT, FLOAT16], + var: Union[DOUBLE, FLOAT, FLOAT16], + epsilon: float = 9.999999747378752e-06, + momentum: float = 0.8999999761581421, + ) -> Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ]: + r"""[🌐 BatchNormalization(9)](https://onnx.ai/onnx/operators/onnx__BatchNormalization.html#batchnormalization-9 "Online Documentation") + + + Carries out batch normalization as described in the paper + https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, + there are multiple cases for the number of outputs, which we list below: + + Output case #1: Y, mean, var, saved_mean, saved_var (training mode) + Output case #2: Y (test mode) + + For previous (depreciated) non-spatial cases, implementors are suggested + to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op. + This operator has **optional** inputs/outputs. See `ONNX `_ for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + + + Args: + X: (differentiable) Input data tensor from the previous operator; dimensions + are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, + C is the number of channels. Statistics are computed for every channel + of C over N and D1 to Dn dimensions. For image data, input dimensions + become (N x C x H x W). The op also accepts single dimension input of + size N in which case C is assumed to be 1 + + scale: (differentiable) Scale tensor of shape (C). + + B: (differentiable) Bias tensor of shape (C). + + mean: (differentiable) running (training) or estimated (testing) mean tensor + of shape (C). + + var: (differentiable) running (training) or estimated (testing) variance + tensor of shape (C). + + epsilon: The epsilon value to use to avoid division by zero. + + momentum: Factor used in computing the running mean and variance.e.g., + running_mean = running_mean * momentum + mean * (1 - momentum). + """ + + schema = get_schema("BatchNormalization", 9, "") + op: Callable[ + ..., + Tuple[ + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + Union[DOUBLE, FLOAT, FLOAT16], + ], + ] = Op(self, "BatchNormalization", schema) + return op( + *self._prepare_inputs(schema, X, scale, B, mean, var), + epsilon=epsilon, + momentum=momentum, + ) + + def Cast( + self, + input: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + to: Optional[int] = None, + ) -> Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Cast(9)](https://onnx.ai/onnx/operators/onnx__Cast.html#cast-9 "Online Documentation") + + + The operator casts the elements of a given input tensor to a data type + specified by the 'to' argument and returns an output tensor of the same size in + the converted type. The 'to' argument must be one of the data types specified + in the 'DataType' enum field in the TensorProto message. + + Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations + (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may + result 100. There are some string literals reserved for special floating-point values; + "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively. + Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly, + this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors + to string tensors, plain floating-point representation (such as "314.15926") would be used. + Converting non-numerical-literal string such as "Hello World!" is an undefined behavior. Cases + of converting string representing floating-point arithmetic value, such as "2.718", to INT is an undefined behavior. + + Conversion from a numerical type to any numerical type is always allowed. + User must be aware of precision loss and value change caused by range difference between two types. + For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting + an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type. + + + Args: + input: Input tensor to be cast. + + to: The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + """ + + schema = get_schema("Cast", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Cast", schema) + return op(*self._prepare_inputs(schema, input), to=to) + + def Compress( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + condition: BOOL, + axis: Optional[int] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Compress(9)](https://onnx.ai/onnx/operators/onnx__Compress.html#compress-9 "Online Documentation") + + + Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. + In case axis is not provided, input is flattened before elements are selected. + Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html + + + Args: + input: Tensor of rank r >= 1. + + condition: Rank 1 tensor of booleans to indicate which slices or data + elements to be selected. Its length can be less than the input length + alone the axis or the flattened input size if axis is not specified. In + such cases data slices or elements exceeding the condition length are + discarded. + + axis: (Optional) Axis along which to take slices. If not specified, input is + flattened before elements being selected. + """ + + schema = get_schema("Compress", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Compress", schema) + return op(*self._prepare_inputs(schema, input, condition), axis=axis) + + def Constant( + self, value: Optional[TensorProto] = None + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Constant(9)](https://onnx.ai/onnx/operators/onnx__Constant.html#constant-9 "Online Documentation") + + A constant tensor. + + Args: + value: The value for the elements of the output tensor. + """ + + schema = get_schema("Constant", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Constant", schema) + return op(value=value) + + def ConstantOfShape( + self, input: INT64, value: Optional[TensorProto] = None + ) -> Union[ + BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 ConstantOfShape(9)](https://onnx.ai/onnx/operators/onnx__ConstantOfShape.html#constantofshape-9 "Online Documentation") + + + Generate a tensor with given value and shape. + + + Args: + input: 1D tensor. The shape of the expected output tensor. If empty tensor + is given, the output would be a scalar. All values must be >= 0. + + value: (Optional) The value of the output elements.Should be a one-element + tensor. If not specified, it defaults to a tensor of value 0 and + datatype float32 + """ + + schema = get_schema("ConstantOfShape", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "ConstantOfShape", schema) + return op(*self._prepare_inputs(schema, input), value=value) + + def Cosh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Cosh(9)](https://onnx.ai/onnx/operators/onnx__Cosh.html#cosh-9 "Online Documentation") + + + Calculates the hyperbolic cosine of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Cosh", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Cosh", schema) + return op(*self._prepare_inputs(schema, input)) + + def Erf( + self, + input: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Erf(9)](https://onnx.ai/onnx/operators/onnx__Erf.html#erf-9 "Online Documentation") + + + Computes the error function of the given input tensor element-wise. + + + Args: + input: Input tensor + """ + + schema = get_schema("Erf", 9, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Erf", schema) + return op(*self._prepare_inputs(schema, input)) + + def EyeLike( + self, + input: Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + dtype: Optional[int] = None, + k: int = 0, + ) -> Union[ + BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 EyeLike(9)](https://onnx.ai/onnx/operators/onnx__EyeLike.html#eyelike-9 "Online Documentation") + + + Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D + tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the + same as the input tensor. The data type can be specified by the 'dtype' argument. If + 'dtype' is not specified, then the type of input tensor is used. By default, the main diagonal + is populated with ones, but attribute 'k' can be used to populate upper or lower diagonals. + The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the + TensorProto message and be valid as an output type. + + + Args: + input: 2D input tensor to copy shape, and optionally, type information from. + + dtype: (Optional) The data type for the elements of the output tensor. If + not specified,the data type of the input tensor T1 is used. If input + tensor T1 is also notspecified, then type defaults to 'float'. + + k: (Optional) Index of the diagonal to be populated with ones. Default is 0. + If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the + main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a + lower diagonal. + """ + + schema = get_schema("EyeLike", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "EyeLike", schema) + return op(*self._prepare_inputs(schema, input), dtype=dtype, k=k) + + def Flatten( + self, + input: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 1, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Flatten(9)](https://onnx.ai/onnx/operators/onnx__Flatten.html#flatten-9 "Online Documentation") + + + Flattens the input tensor into a 2D matrix. If input tensor has shape + (d_0, d_1, ... d_n) then the output will have shape + (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). + + + Args: + input: A tensor of rank >= axis. + + axis: Indicate up to which input dimensions (exclusive) should be flattened + to the outer dimension of the output. The value for axis must be in the + range [0, R], where R is the rank of the input tensor. When axis = 0, + the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the + shape of the input tensor is (d_0, d_1, ... d_n). + """ + + schema = get_schema("Flatten", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Flatten", schema) + return op(*self._prepare_inputs(schema, input), axis=axis) + + def Gemm( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + C: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + alpha: float = 1.0, + beta: float = 1.0, + transA: int = 0, + transB: int = 0, + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 Gemm(9)](https://onnx.ai/onnx/operators/onnx__Gemm.html#gemm-9 "Online Documentation") + + General Matrix multiplication: + https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + + A' = transpose(A) if transA else A + + B' = transpose(B) if transB else B + + Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), + input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), + and output tensor Y has shape (M, N). A will be transposed before doing the + computation if attribute transA is non-zero, same for B and transB. + This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check `Broadcasting in ONNX `_. + + Args: + A: Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) + if transA is non-zero. + + B: Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) + if transB is non-zero. + + C: Input tensor C. The shape of C should be unidirectional broadcastable to + (M, N). + + alpha: Scalar multiplier for the product of input tensors A * B. + + beta: Scalar multiplier for input tensor C. + + transA: Whether A should be transposed + + transB: Whether B should be transposed + """ + + schema = get_schema("Gemm", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "Gemm", schema + ) + return op( + *self._prepare_inputs(schema, A, B, C), + alpha=alpha, + beta=beta, + transA=transA, + transB=transB, + ) + + def Greater( + self, + A: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + B: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> BOOL: + r"""[🌐 Greater(9)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-9 "Online Documentation") + + + Returns the tensor resulted from performing the `greater` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Greater", 9, "") + op: Callable[..., BOOL] = Op(self, "Greater", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def IsNaN(self, X: Union[DOUBLE, FLOAT, FLOAT16]) -> BOOL: + r"""[🌐 IsNaN(9)](https://onnx.ai/onnx/operators/onnx__IsNaN.html#isnan-9 "Online Documentation") + + Returns which elements of the input are NaN. + + Args: + X: input + """ + + schema = get_schema("IsNaN", 9, "") + op: Callable[..., BOOL] = Op(self, "IsNaN", schema) + return op(*self._prepare_inputs(schema, X)) + + def Less( + self, + A: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + B: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> BOOL: + r"""[🌐 Less(9)](https://onnx.ai/onnx/operators/onnx__Less.html#less-9 "Online Documentation") + + + Returns the tensor resulted from performing the `less` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + + Args: + A: First input operand for the logical operator. + + B: Second input operand for the logical operator. + """ + + schema = get_schema("Less", 9, "") + op: Callable[..., BOOL] = Op(self, "Less", schema) + return op(*self._prepare_inputs(schema, A, B)) + + def MatMul( + self, + A: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + B: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 MatMul(9)](https://onnx.ai/onnx/operators/onnx__MatMul.html#matmul-9 "Online Documentation") + + + Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html + + + Args: + A: N-dimensional matrix A + + B: N-dimensional matrix B + """ + + schema = get_schema("MatMul", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "MatMul", schema + ) + return op(*self._prepare_inputs(schema, A, B)) + + def MaxUnpool( + self, + X: Union[DOUBLE, FLOAT, FLOAT16], + I: INT64, + output_shape: Optional[INT64] = None, + kernel_shape: Optional[Sequence[int]] = None, + pads: Optional[Sequence[int]] = None, + strides: Optional[Sequence[int]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MaxUnpool(9)](https://onnx.ai/onnx/operators/onnx__MaxUnpool.html#maxunpool-9 "Online Documentation") + + + MaxUnpool essentially computes the partial inverse of the MaxPool op. + The input information to this op is typically the output information from a MaxPool op. The first + input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) + from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding + to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. + The third (optional) input is a tensor that specifies the output size of the unpooling operation. + + MaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal + values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling + the result of an unpooling operation should give back the original input to the unpooling op. + + MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous. + The third input argument, output_size, is meant to disambiguate the op and produce output tensor of + known/predictable size. + + In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads, + which define the exact unpooling op. The attributes typically have the same values as the corrsponding + pooling op that the unpooling op is trying to invert. + + + Args: + X: Input data tensor that has to be unpooled. This tensor is typically the + first output of the MaxPool op.Dimensions for image case are (N x C x H + x W), where N is the batch size, C is the number of channels, and H and + W are the height and the width of the data. For non-image case, the + dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the + batch size. Optionally, if dimension denotation is in effect, the + operation expects the input data tensor to arrive with the dimension + denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE + ...]. + + I: Input data tensor containing the indices corresponding to elements in the + first input tensor X.This tensor is typically the second output of the + MaxPool op.Dimensions must be the same as input tensor X. The indices + are linear, i.e. computed considering the tensor as flattened 1-D + tensor, assuming row-major storage. Also, the linear indices should not + consider padding. So the values in indices are in the range [0, N x C x + D1 x ... x Dn). + + output_shape: (optional) The shape of the output can be explicitly set which + will cause pads values to be auto generated. If 'output_shape' is + specified, 'pads' values are ignored. + + kernel_shape: The size of the kernel along each axis. + + pads: Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis `i` and xi_end, the number of pixels + added at the end of axis `i`. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. + + strides: Stride along each spatial axis. + """ + + schema = get_schema("MaxUnpool", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "MaxUnpool", schema) + return op( + *self._prepare_inputs(schema, X, I, output_shape), + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + ) + + def MeanVarianceNormalization( + self, X: Union[DOUBLE, FLOAT, FLOAT16], axes: Sequence[int] = (0, 2, 3) + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 MeanVarianceNormalization(9)](https://onnx.ai/onnx/operators/onnx__MeanVarianceNormalization.html#meanvariancenormalization-9 "Online Documentation") + + + A MeanVarianceNormalization Function: Perform mean variance normalization + on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ``` + + + Args: + X: Input tensor + + axes: A list of integers, along which to reduce. The default is to caculate + along axes [0,2,3] for calculating mean and variance along each channel. + Two variables with the same C-coordinate are associated with the same + mean and variance. + """ + + schema = get_schema("MeanVarianceNormalization", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op( + self, "MeanVarianceNormalization", schema + ) + return op(*self._prepare_inputs(schema, X), axes=axes) + + def NonZero( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> INT64: + r"""[🌐 NonZero(9)](https://onnx.ai/onnx/operators/onnx__NonZero.html#nonzero-9 "Online Documentation") + + + Returns the indices of the elements that are non-zero + (in row-major order - by dimension). + NonZero behaves similar to numpy.nonzero: + https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, + but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy's behavior. + + + Args: + X: input + """ + + schema = get_schema("NonZero", 9, "") + op: Callable[..., INT64] = Op(self, "NonZero", schema) + return op(*self._prepare_inputs(schema, X)) + + def OneHot( + self, + indices: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + depth: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + values: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = -1, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 OneHot(9)](https://onnx.ai/onnx/operators/onnx__OneHot.html#onehot-9 "Online Documentation") + + + Produces a one-hot tensor based on inputs. + The locations represented by the index values in the 'indices' input tensor will have 'on_value' + and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value' + are specified as part of required input argument 'values', which is a two-element tensor of format + [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the + input tensor. The additional dimension is for one-hot representation. The additional dimension will + be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional + dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional + dimension is specified by required scalar input 'depth'. The type of the output tensor is the same + as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside + the range [0, depth) will result in one-hot representation with all 'off_value' values in the + output tensor. + + + Args: + indices: Input tensor containing indices. The values must be non-negative + integers. Any entries in the 'indices' input tensor with values outside + the range [0, depth) will result in one-hot representation with all + 'off_value' values in the output tensor.In case 'indices' is of + non-integer type, the values will be casted to int64 before use. + + depth: Scalar specifying the number of classes in one-hot tensor. This is + also the size of the one-hot dimension (specified by 'axis' attribute) + added on in the output tensor. The values in the 'indices' input tensor + are expected to be in the range [0, depth). In case 'depth' is of + non-integer type, it will be casted to int64 before use. + + values: Rank 1 tensor containing exactly two elements, in the format + [off_value, on_value], where 'on_value' is the value used for filling + locations specified in 'indices' input tensor, and 'off_value' is the + value used for filling locations other than those specified in 'indices' + input tensor. + + axis: (Optional) Axis along which one-hot representation in added. Default: + axis=-1. axis=-1 means that the additional dimension will be inserted as + the innermost/last dimension in the output tensor. + """ + + schema = get_schema("OneHot", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "OneHot", schema) + return op(*self._prepare_inputs(schema, indices, depth, values), axis=axis) + + def PRelu( + self, + X: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + slope: Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64], + ) -> Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]: + r"""[🌐 PRelu(9)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-9 "Online Documentation") + + + PRelu takes input data (Tensor) and slope tensor as input, and produces one + output data (Tensor) where the function `f(x) = slope * x for x < 0`, + `f(x) = x for x >= 0`., is applied to the data tensor elementwise. + This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check `Broadcasting in ONNX `_. + + Args: + X: (differentiable) Input tensor + + slope: (differentiable) Slope tensor. The shape of slope can be smaller then + first input X; if so, its shape must be unidirectional broadcastable to + X + """ + + schema = get_schema("PRelu", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64]] = Op( + self, "PRelu", schema + ) + return op(*self._prepare_inputs(schema, X, slope)) + + def Scan( + self, + *initial_state_and_scan_inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + body: Optional[GraphProto] = None, + num_scan_inputs: Optional[int] = None, + scan_input_axes: Optional[Sequence[int]] = None, + scan_input_directions: Optional[Sequence[int]] = None, + scan_output_axes: Optional[Sequence[int]] = None, + scan_output_directions: Optional[Sequence[int]] = None, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scan(9)](https://onnx.ai/onnx/operators/onnx__Scan.html#scan-9 "Online Documentation") + + + Scan can be used to iterate over one or more scan_input tensors, + constructing zero or more scan_output tensors. It combines ideas from general recurrences, + functional programming constructs such as scan, fold, map, and zip, and is intended to enable + generalizations of RNN-like constructs for sequence-to-sequence processing. + Other tensors (referred to as state_variables here) can be used to carry a state + when iterating from one element to another (similar to hidden-state in RNNs, also referred + to as loop-carried dependences in the context of loops). + Many common usages involve a single scan_input tensor (where functionality + similar to scan, fold and map can be obtained). When more than one scan_input is used, + a behavior similar to zip is obtained. + + The attribute body must be a graph, specifying the computation to be performed in + every iteration. It takes as input the current values of the state_variables and + the current iterated element of the scan_inputs. It must return the (updated) values + of the state_variables and zero or more scan_output_element tensors. The values of the + scan_output_element tensors are concatenated over all the iterations to produce the + scan_output values of the scan construct (similar to the concatenated intermediate + hidden-state values of RNN-like constructs). All the output tensors (state_variables as + well as scan_output_element tensors) are required to have the same shape in each iteration + of the loop (a restriction imposed to enable efficient memory allocation). + + Note that the iterated element passed to the body subgraph does not have a sequence + axis. It will have a rank one less than the rank of the corresponding scan_input. + + The scan operation returns the final values of the state_variables as well as the + scan_outputs. + + The optional attribute scan_input_directions specifies the direction (forward or backward) + for each scan input. If this attribute is omitted, all sequences are scanned in the forward + direction. A bidirectional scan may be performed by specifying the same tensor input twice + in the scan_inputs, once with a forward direction, and once with a backward direction. + + The scan_output of the operation is produced by concatenating the scan_output_element + values produced by the body in each iteration. The optional attribute scan_output_directions + specifies the direction in which scan_output is constructed (by appending or prepending the + scan_output_element to scan_output in each iteration) for each scan_output. If this attribute + is omitted, the scan_output_element is appended to the scan_output in each iteration. + + The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. + If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the + batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. + Note that scanning a non-zero axis may be less efficient than scanning axis zero. + + The optional attribute scan_output_axes specifies the axis along which the scan_outputs + are accumulated for each scan_output. For example, if axis 1 is the time axis (to be + scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis + value of 1. + + Note that because of the ONNX restriction that only the last parameter of an operator can + be variadic, the initial-states and scan-inputs are listed together as one input parameter. + Similarly, the final-states and scan-outputs are listed together as one output parameter. + The attribute num_scan_inputs indicates the number M of scan-inputs. + + The behavior of + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + + is equivalent to the following pseudo-code: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + + *Sample usage: Encoding RNN using a Scan* + + The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, + recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can + be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes + %Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these + values are computed in the outer graph, they need to be passed in as extra state_variables. + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + + + + Args: + initial_state_and_scan_inputs: (variadic, heterogeneous) Initial values of + the loop's N state variables followed by M scan_inputs + + body: The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. + + num_scan_inputs: An attribute specifying the number of scan_inputs M. + + scan_input_axes: An optional list of M flags. The i-th element of the list + specifies the axis to be scanned (the sequence axis) for the i-th + scan_input. If omitted, 0 will be used as the scan axis for every + scan_input. + + scan_input_directions: An optional list of M flags. The i-th element of the + list specifies the direction to be scanned for the i-th scan_input + tensor: 0 indicates forward direction and 1 indicates reverse direction. + If omitted, all scan_input tensors will be scanned in the forward + direction. + + scan_output_axes: An optional list of K flags. The i-th element of the list + specifies the axis for the i-th scan_output. The scan outputs are + accumulated along the specified axis. If omitted, 0 will be used as the + scan axis for every scan_output. + + scan_output_directions: An optional list of K flags, one for each + scan_output. The i-th element of the list specifies whether the i-th + scan_output should be constructed by appending or prepending a new value + in each iteration: 0 indicates appending and 1 indicates prepending. If + omitted, all scan_output tensors will be produced by appending a value + in each iteration. + """ + + schema = get_schema("Scan", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scan", schema) + return op( + *self._prepare_inputs(schema, *initial_state_and_scan_inputs), + body=body, + num_scan_inputs=num_scan_inputs, + scan_input_axes=scan_input_axes, + scan_input_directions=scan_input_directions, + scan_output_axes=scan_output_axes, + scan_output_directions=scan_output_directions, + ) + + def Scatter( + self, + data: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + indices: Union[INT32, INT64], + updates: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + axis: int = 0, + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Scatter(9)](https://onnx.ai/onnx/operators/onnx__Scatter.html#scatter-9 "Online Documentation") + + + Given `data`, `updates` and `indices` input tensors of rank r >= 1, write the values provided by `updates` + into the first input, `data`, along `axis` dimension of `data` (by default outer-most one as axis=0) at corresponding `indices`. + For each entry in `updates`, the target index in `data` is specified by corresponding entry in `indices` + for dimension = axis, and index in source for dimension != axis. For instance, in a 2-D tensor case, + data[indices[i][j]][j] = updates[i][j] if axis = 0, or data[i][indices[i][j]] = updates[i][j] if axis = 1, + where i and j are loop counters from 0 up to the respective size in `updates` - 1. + Example 1: + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + Example 2: + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + + + Args: + data: Tensor of rank r >= 1. + + indices: Tensor of int32/int64 indices, of r >= 1 (same rank as input). + + updates: Tensor of rank r >=1 (same rank and shape as indices) + + axis: Which axis to scatter on. Negative value means counting dimensions + from the back. Accepted range is [-r, r-1] + """ + + schema = get_schema("Scatter", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Scatter", schema) + return op(*self._prepare_inputs(schema, data, indices, updates), axis=axis) + + def Shrink( + self, + input: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + bias: float = 0.0, + lambd: float = 0.5, + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Shrink(9)](https://onnx.ai/onnx/operators/onnx__Shrink.html#shrink-9 "Online Documentation") + + + Shrink takes one input data (Tensor) and produces one Tensor output, + having same datatype and shape with input. It has two attributes, lambd and + bias. The formula of this operator is: If x < -lambd, y = x + bias; + If x > lambd, y = x - bias; Otherwise, y = 0. + + + Args: + input: (differentiable) The input data as Tensor. + + bias: The bias value added to output. Default is 0. + + lambd: The lambd value for the Shrink formulation. Default is 0.5. + """ + + schema = get_schema("Shrink", 9, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Shrink", schema) + return op(*self._prepare_inputs(schema, input), bias=bias, lambd=lambd) + + def Sign( + self, + input: Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ], + ) -> Union[ + DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8 + ]: + r"""[🌐 Sign(9)](https://onnx.ai/onnx/operators/onnx__Sign.html#sign-9 "Online Documentation") + + + Calculate the sign of the given input tensor element-wise. + If input > 0, output 1. if input < 0, output -1. if input == 0, output 0. + + + Args: + input: Input tensor + """ + + schema = get_schema("Sign", 9, "") + op: Callable[ + ..., + Union[ + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Sign", schema) + return op(*self._prepare_inputs(schema, input)) + + def Sinh(self, input: Union[DOUBLE, FLOAT, FLOAT16]) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 Sinh(9)](https://onnx.ai/onnx/operators/onnx__Sinh.html#sinh-9 "Online Documentation") + + + Calculates the hyperbolic sine of the given input tensor element-wise. + + + Args: + input: (differentiable) Input tensor + """ + + schema = get_schema("Sinh", 9, "") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Sinh", schema) + return op(*self._prepare_inputs(schema, input)) + + def TfIdfVectorizer( + self, + X: Union[INT32, INT64, STRING], + max_gram_length: Optional[int] = None, + max_skip_count: Optional[int] = None, + min_gram_length: Optional[int] = None, + mode: Optional[str] = None, + ngram_counts: Optional[Sequence[int]] = None, + ngram_indexes: Optional[Sequence[int]] = None, + pool_int64s: Optional[Sequence[int]] = None, + pool_strings: Optional[Sequence[str]] = None, + weights: Optional[Sequence[float]] = None, + ) -> FLOAT: + r"""[🌐 TfIdfVectorizer(9)](https://onnx.ai/onnx/operators/onnx__TfIdfVectorizer.html#tfidfvectorizer-9 "Online Documentation") + + + This transform extracts n-grams from the input sequence and save them as a vector. Input can + be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. + For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. + More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. + If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor. + + In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original + sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. + If the number of skips is 2, we should skip two tokens when scanning through the original sequence. + Let's consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. + The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. + If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] + indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively. + + The output vector (denoted by Y) stores the count of each n-gram; + Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping + between index i and the corresponding n-gram's output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], + ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], + respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. + Note that we may consider all skips up to S when generating the n-grams. + + The examples used above are true if mode is "TF". If mode is "IDF", all the counts larger than 1 would be truncated to 1 and + the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is "TFIDF", + this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute. + + Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. + If pool_strings is set, the input must be a string tensor. + + + Args: + X: (non-differentiable) Input for n-gram extraction + + max_gram_length: Maximum n-gram length. If this value is 3, 3-grams will be + used to generate the output. + + max_skip_count: Maximum number of items (integers/strings) to be skipped + when constructing an n-gram from X. If max_skip_count=1, + min_gram_length=2, max_gram_length=3, this operator may generate 2-grams + with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and + skip_count=1 + + min_gram_length: Minimum n-gram length. If this value is 2 and + max_gram_length is 3, output may contain counts of 2-grams and 3-grams. + + mode: The weighting criteria. It can be one of "TF" (term frequency), "IDF" + (inverse document frequency), and "TFIDF" (the combination of TF and + IDF) + + ngram_counts: The starting indexes of 1-grams, 2-grams, and so on in pool. + It is useful when determining the boundary between two consecutive + collections of n-grams. For example, if ngram_counts is [0, 17, 36], the + first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. + This format is essentially identical to CSR (or CSC) sparse matrix + format, and we choose to use this due to its popularity. + + ngram_indexes: list of int64s (type: AttributeProto::INTS). This list is + parallel to the specified 'pool_*' attribute. The i-th element in + ngram_indexes indicate the coordinate of the i-th n-gram in the output + tensor. + + pool_int64s: List of int64 n-grams learned from the training set. Either + this or pool_strings attributes must be present but not both. It's an + 1-D tensor starting with the collections of all 1-grams and ending with + the collections of n-grams. The i-th element in pool stores the n-gram + that should be mapped to coordinate ngram_indexes[i] in the output + vector. + + pool_strings: List of strings n-grams learned from the training set. Either + this or pool_int64s attributes must be present but not both. It's an 1-D + tensor starting with the collections of all 1-grams and ending with the + collections of n-grams. The i-th element in pool stores the n-gram that + should be mapped to coordinate ngram_indexes[i] in the output vector. + + weights: list of floats. This attribute stores the weight of each n-gram in + pool. The i-th element in weights is the weight of the i-th n-gram in + pool. Its length equals to the size of ngram_indexes. By default, + weights is an all-one tensor.This attribute is used when mode is "IDF" + or "TFIDF" to scale the associated word counts. + """ + + schema = get_schema("TfIdfVectorizer", 9, "") + op: Callable[..., FLOAT] = Op(self, "TfIdfVectorizer", schema) + return op( + *self._prepare_inputs(schema, X), + max_gram_length=max_gram_length, + max_skip_count=max_skip_count, + min_gram_length=min_gram_length, + mode=mode, + ngram_counts=ngram_counts, + ngram_indexes=ngram_indexes, + pool_int64s=pool_int64s, + pool_strings=pool_strings, + weights=weights, + ) + + def Upsample( + self, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + scales: FLOAT, + mode: str = "nearest", + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Upsample(9)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-9 "Online Documentation") + + + Upsample the input tensor. + Each dimension value of the output tensor is: + output_dimension = floor(input_dimension * scale). + + + Args: + X: N-D tensor + + scales: The scale array along each dimension. It takes value greater than or + equal to 1. The number of elements of 'scales' should be the same as the + rank of input 'X'. + + mode: Two interpolation modes: nearest (default), and linear (including + bilinear, trilinear, etc) + """ + + schema = get_schema("Upsample", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Upsample", schema) + return op(*self._prepare_inputs(schema, X, scales), mode=mode) + + def Where( + self, + condition: BOOL, + X: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + Y: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ) -> Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ]: + r"""[🌐 Where(9)](https://onnx.ai/onnx/operators/onnx__Where.html#where-9 "Online Documentation") + + + Return elements, either from X or Y, depending on condition. + Where behaves like + [numpy.where](https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html) + with three parameters. + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check `Broadcasting in ONNX `_. + + Args: + condition: (non-differentiable) When True (nonzero), yield X, otherwise + yield Y + + X: (differentiable) values selected at indices where condition is True + + Y: (differentiable) values selected at indices where condition is False + """ + + schema = get_schema("Where", 9, "") + op: Callable[ + ..., + Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + ] = Op(self, "Where", schema) + return op(*self._prepare_inputs(schema, condition, X, Y)) diff --git a/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml1.py b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml1.py new file mode 100644 index 0000000000..c65ef054da --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml1.py @@ -0,0 +1,920 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Mapping, Optional, Sequence, Tuple, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_types import DOUBLE, FLOAT, INT32, INT64, STRING +from onnxscript.values import Op, Opset + + +class Opset_ai_onnx_ml1(Opset): + def __new__(cls): + return Opset.__new__(cls, "ai.onnx.ml", 1) + + def __init__(self): + super().__init__() + + def ArrayFeatureExtractor( + self, X: Union[DOUBLE, FLOAT, INT32, INT64, STRING], Y: INT64 + ) -> Union[DOUBLE, FLOAT, INT32, INT64, STRING]: + r"""[🌐 ai.onnx.ml::ArrayFeatureExtractor(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_ArrayFeatureExtractor.html#arrayfeatureextractor-1 "Online Documentation") + + + Select elements of the input tensor based on the indices passed. + + The indices are applied to the last axes of the tensor. + + + Args: + X: Data to be selected + + Y: The indices, based on 0 as the first index of any dimension. + """ + + schema = get_schema("ArrayFeatureExtractor", 1, "ai.onnx.ml") + op: Callable[..., Union[DOUBLE, FLOAT, INT32, INT64, STRING]] = Op( + self, "ArrayFeatureExtractor", schema + ) + return op(*self._prepare_inputs(schema, X, Y)) + + def Binarizer( + self, X: Union[DOUBLE, FLOAT, INT32, INT64], threshold: float = 0.0 + ) -> Union[DOUBLE, FLOAT, INT32, INT64]: + r"""[🌐 ai.onnx.ml::Binarizer(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_Binarizer.html#binarizer-1 "Online Documentation") + + + Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value. + + + Args: + X: Data to be binarized + + threshold: Values greater than this are mapped to 1, others to 0. + """ + + schema = get_schema("Binarizer", 1, "ai.onnx.ml") + op: Callable[..., Union[DOUBLE, FLOAT, INT32, INT64]] = Op(self, "Binarizer", schema) + return op(*self._prepare_inputs(schema, X), threshold=threshold) + + def CastMap( + self, + X: Union[Mapping[int, FLOAT], Mapping[int, STRING]], + cast_to: str = "TO_FLOAT", + map_form: str = "DENSE", + max_map: int = 1, + ) -> Union[FLOAT, INT64, STRING]: + r"""[🌐 ai.onnx.ml::CastMap(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_CastMap.html#castmap-1 "Online Documentation") + + + Converts a map to a tensor. + The map key must be an int64 and the values will be ordered + in ascending order based on this key. + The operator supports dense packing or sparse packing. + If using sparse packing, the key cannot exceed the max_map-1 value. + + + Args: + X: The input map that is to be cast to a tensor + + cast_to: A string indicating the desired element type of the output tensor, + one of 'TO_FLOAT', 'TO_STRING', 'TO_INT64'. + + map_form: Indicates whether to only output as many values as are in the + input (dense), or position the input based on using the key of the map + as the index of the output (sparse).
One of 'DENSE', 'SPARSE'. + + max_map: If the value of map_form is 'SPARSE,' this attribute indicates the + total length of the output tensor. + """ + + schema = get_schema("CastMap", 1, "ai.onnx.ml") + op: Callable[..., Union[FLOAT, INT64, STRING]] = Op(self, "CastMap", schema) + return op( + *self._prepare_inputs(schema, X), + cast_to=cast_to, + map_form=map_form, + max_map=max_map, + ) + + def CategoryMapper( + self, + X: Union[INT64, STRING], + cats_int64s: Optional[Sequence[int]] = None, + cats_strings: Optional[Sequence[str]] = None, + default_int64: int = -1, + default_string: str = "_Unused", + ) -> Union[INT64, STRING]: + r"""[🌐 ai.onnx.ml::CategoryMapper(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_CategoryMapper.html#categorymapper-1 "Online Documentation") + + + Converts strings to integers and vice versa. + + Two sequences of equal length are used to map between integers and strings, + with strings and integers at the same index detailing the mapping. + + Each operator converts either integers to strings or strings to integers, depending + on which default value attribute is provided. Only one default value attribute + should be defined. + + If the string default value is set, it will convert integers to strings. + If the int default value is set, it will convert strings to integers. + + + Args: + X: Input data + + cats_int64s: The integers of the map. This sequence must be the same length + as the 'cats_strings' sequence. + + cats_strings: The strings of the map. This sequence must be the same length + as the 'cats_int64s' sequence + + default_int64: An integer to use when an input string value is not found in + the map.
One and only one of the 'default_*' attributes must be + defined. + + default_string: A string to use when an input integer value is not found in + the map.
One and only one of the 'default_*' attributes must be + defined. + """ + + schema = get_schema("CategoryMapper", 1, "ai.onnx.ml") + op: Callable[..., Union[INT64, STRING]] = Op(self, "CategoryMapper", schema) + return op( + *self._prepare_inputs(schema, X), + cats_int64s=cats_int64s, + cats_strings=cats_strings, + default_int64=default_int64, + default_string=default_string, + ) + + def DictVectorizer( + self, + X: Union[ + Mapping[int, DOUBLE], + Mapping[int, FLOAT], + Mapping[int, STRING], + Mapping[str, DOUBLE], + Mapping[str, FLOAT], + Mapping[str, INT64], + ], + int64_vocabulary: Optional[Sequence[int]] = None, + string_vocabulary: Optional[Sequence[str]] = None, + ) -> Union[DOUBLE, FLOAT, INT64, STRING]: + r"""[🌐 ai.onnx.ml::DictVectorizer(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_DictVectorizer.html#dictvectorizer-1 "Online Documentation") + + + Uses an index mapping to convert a dictionary to an array. + + Given a dictionary, each key is looked up in the vocabulary attribute corresponding to + the key type. The index into the vocabulary array at which the key is found is then + used to index the output 1-D tensor 'Y' and insert into it the value found in the dictionary 'X'. + + The key type of the input map must correspond to the element type of the defined vocabulary attribute. + Therefore, the output array will be equal in length to the index mapping vector parameter. + All keys in the input dictionary must be present in the index mapping vector. + For each item in the input dictionary, insert its value in the output array. + Any keys not present in the input dictionary, will be zero in the output array. + + For example: if the ``string_vocabulary`` parameter is set to ``["a", "c", "b", "z"]``, + then an input of ``{"a": 4, "c": 8}`` will produce an output of ``[4, 8, 0, 0]``. + + + Args: + X: A dictionary. + + int64_vocabulary: An integer vocabulary array.
One and only one of the + vocabularies must be defined. + + string_vocabulary: A string vocabulary array.
One and only one of the + vocabularies must be defined. + """ + + schema = get_schema("DictVectorizer", 1, "ai.onnx.ml") + op: Callable[..., Union[DOUBLE, FLOAT, INT64, STRING]] = Op( + self, "DictVectorizer", schema + ) + return op( + *self._prepare_inputs(schema, X), + int64_vocabulary=int64_vocabulary, + string_vocabulary=string_vocabulary, + ) + + def FeatureVectorizer( + self, + *X: Union[DOUBLE, FLOAT, INT32, INT64], + inputdimensions: Optional[Sequence[int]] = None, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::FeatureVectorizer(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_FeatureVectorizer.html#featurevectorizer-1 "Online Documentation") + + + Concatenates input tensors into one continuous output. + + All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. + Inputs are copied to the output maintaining the order of the input arguments. + + All inputs must be integers or floats, while the output will be all floating point values. + + + Args: + X: (variadic) An ordered collection of tensors, all with the same element + type. + + inputdimensions: The size of each input in the input list + """ + + schema = get_schema("FeatureVectorizer", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "FeatureVectorizer", schema) + return op(*self._prepare_inputs(schema, *X), inputdimensions=inputdimensions) + + def Imputer( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + imputed_value_floats: Optional[Sequence[float]] = None, + imputed_value_int64s: Optional[Sequence[int]] = None, + replaced_value_float: float = 0.0, + replaced_value_int64: int = 0, + ) -> Union[DOUBLE, FLOAT, INT32, INT64]: + r"""[🌐 ai.onnx.ml::Imputer(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_Imputer.html#imputer-1 "Online Documentation") + + + Replaces inputs that equal one value with another, leaving all other elements alone. + + This operator is typically used to replace missing values in situations where they have a canonical + representation, such as -1, 0, NaN, or some extreme value. + + One and only one of imputed_value_floats or imputed_value_int64s should be defined -- floats if the input tensor + holds floats, integers if the input tensor holds integers. The imputed values must all fit within the + width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, + which one depends on whether floats or integers are being processed. + + The imputed_value attribute length can be 1 element, or it can have one element per input feature. + In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature. + + + Args: + X: Data to be processed. + + imputed_value_floats: Value(s) to change to + + imputed_value_int64s: Value(s) to change to. + + replaced_value_float: A value that needs replacing. + + replaced_value_int64: A value that needs replacing. + """ + + schema = get_schema("Imputer", 1, "ai.onnx.ml") + op: Callable[..., Union[DOUBLE, FLOAT, INT32, INT64]] = Op(self, "Imputer", schema) + return op( + *self._prepare_inputs(schema, X), + imputed_value_floats=imputed_value_floats, + imputed_value_int64s=imputed_value_int64s, + replaced_value_float=replaced_value_float, + replaced_value_int64=replaced_value_int64, + ) + + def LabelEncoder( + self, + X: Union[INT64, STRING], + classes_strings: Optional[Sequence[str]] = None, + default_int64: int = -1, + default_string: str = "_Unused", + ) -> Union[INT64, STRING]: + r"""[🌐 ai.onnx.ml::LabelEncoder(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_LabelEncoder.html#labelencoder-1 "Online Documentation") + + + Converts strings to integers and vice versa. + + If the string default value is set, it will convert integers to strings. + If the int default value is set, it will convert strings to integers. + + Each operator converts either integers to strings or strings to integers, depending + on which default value attribute is provided. Only one default value attribute + should be defined. + + When converting from integers to strings, the string is fetched from the + 'classes_strings' list, by simple indexing. + + When converting from strings to integers, the string is looked up in the list + and the index at which it is found is used as the converted value. + + + Args: + X: Input data. + + classes_strings: A list of labels. + + default_int64: An integer to use when an input string value is not found in + the map.
One and only one of the 'default_*' attributes must be + defined. + + default_string: A string to use when an input integer value is not found in + the map.
One and only one of the 'default_*' attributes must be + defined. + """ + + schema = get_schema("LabelEncoder", 1, "ai.onnx.ml") + op: Callable[..., Union[INT64, STRING]] = Op(self, "LabelEncoder", schema) + return op( + *self._prepare_inputs(schema, X), + classes_strings=classes_strings, + default_int64=default_int64, + default_string=default_string, + ) + + def LinearClassifier( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + classlabels_ints: Optional[Sequence[int]] = None, + classlabels_strings: Optional[Sequence[str]] = None, + coefficients: Optional[Sequence[float]] = None, + intercepts: Optional[Sequence[float]] = None, + multi_class: int = 0, + post_transform: str = "NONE", + ) -> Tuple[Union[INT64, STRING], FLOAT]: + r"""[🌐 ai.onnx.ml::LinearClassifier(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_LinearClassifier.html#linearclassifier-1 "Online Documentation") + + + Linear classifier + + + Args: + X: Data to be classified. + + classlabels_ints: Class labels when using integer labels. One and only one + 'classlabels' attribute must be defined. + + classlabels_strings: Class labels when using string labels. One and only one + 'classlabels' attribute must be defined. + + coefficients: A collection of weights of the model(s). + + intercepts: A collection of intercepts. + + multi_class: Indicates whether to do OvR or multinomial (0=OvR is the + default). + + post_transform: Indicates the transform to apply to the scores + vector.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or + 'PROBIT' + """ + + schema = get_schema("LinearClassifier", 1, "ai.onnx.ml") + op: Callable[..., Tuple[Union[INT64, STRING], FLOAT]] = Op( + self, "LinearClassifier", schema + ) + return op( + *self._prepare_inputs(schema, X), + classlabels_ints=classlabels_ints, + classlabels_strings=classlabels_strings, + coefficients=coefficients, + intercepts=intercepts, + multi_class=multi_class, + post_transform=post_transform, + ) + + def LinearRegressor( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + coefficients: Optional[Sequence[float]] = None, + intercepts: Optional[Sequence[float]] = None, + post_transform: str = "NONE", + targets: int = 1, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::LinearRegressor(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_LinearRegressor.html#linearregressor-1 "Online Documentation") + + + Generalized linear regression evaluation. + + If targets is set to 1 (default) then univariate regression is performed. + + If targets is set to M then M sets of coefficients must be passed in as a sequence + and M results will be output for each input n in N. + + The coefficients array is of length n, and the coefficients for each target are contiguous. + Intercepts are optional but if provided must match the number of targets. + + + Args: + X: Data to be regressed. + + coefficients: Weights of the model(s). + + intercepts: Weights of the intercepts, if used. + + post_transform: Indicates the transform to apply to the regression output + vector.
One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or + 'PROBIT' + + targets: The total number of regression targets, 1 if not defined. + """ + + schema = get_schema("LinearRegressor", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "LinearRegressor", schema) + return op( + *self._prepare_inputs(schema, X), + coefficients=coefficients, + intercepts=intercepts, + post_transform=post_transform, + targets=targets, + ) + + def Normalizer(self, X: Union[DOUBLE, FLOAT, INT32, INT64], norm: str = "MAX") -> FLOAT: + r"""[🌐 ai.onnx.ml::Normalizer(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_Normalizer.html#normalizer-1 "Online Documentation") + + + Normalize the input. There are three normalization modes, which have the corresponding formulas, + defined using element-wise infix operators '/' and '^' and tensor-wide functions 'max' and 'sum': + + + + Max: Y = X / max(X) + + L1: Y = X / sum(X) + + L2: Y = sqrt(X^2 / sum(X^2)} + + In all modes, if the divisor is zero, Y == X. + + + For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row + of the batch is normalized independently. + + + Args: + X: Data to be encoded, a tensor of shape [N,C] or [C] + + norm: One of 'MAX,' 'L1,' 'L2' + """ + + schema = get_schema("Normalizer", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "Normalizer", schema) + return op(*self._prepare_inputs(schema, X), norm=norm) + + def OneHotEncoder( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64, STRING], + cats_int64s: Optional[Sequence[int]] = None, + cats_strings: Optional[Sequence[str]] = None, + zeros: int = 1, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::OneHotEncoder(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_OneHotEncoder.html#onehotencoder-1 "Online Documentation") + + + Replace each input element with an array of ones and zeros, where a single + one is placed at the index of the category that was passed in. The total category count + will determine the size of the extra dimension of the output array Y. + + For example, if we pass a tensor with a single value of 4, and a category count of 8, + the output will be a tensor with ``[0,0,0,0,1,0,0,0]``. + + This operator assumes every input feature is from the same set of categories. + + If the input is a tensor of float, int32, or double, the data will be cast + to integers and the cats_int64s category list will be used for the lookups. + + + Args: + X: Data to be encoded. + + cats_int64s: List of categories, ints.
One and only one of the 'cats_*' + attributes must be defined. + + cats_strings: List of categories, strings.
One and only one of the + 'cats_*' attributes must be defined. + + zeros: If true and category is not present, will return all zeros; if false + and a category if not found, the operator will fail. + """ + + schema = get_schema("OneHotEncoder", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "OneHotEncoder", schema) + return op( + *self._prepare_inputs(schema, X), + cats_int64s=cats_int64s, + cats_strings=cats_strings, + zeros=zeros, + ) + + def SVMClassifier( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + classlabels_ints: Optional[Sequence[int]] = None, + classlabels_strings: Optional[Sequence[str]] = None, + coefficients: Optional[Sequence[float]] = None, + kernel_params: Optional[Sequence[float]] = None, + kernel_type: str = "LINEAR", + post_transform: str = "NONE", + prob_a: Optional[Sequence[float]] = None, + prob_b: Optional[Sequence[float]] = None, + rho: Optional[Sequence[float]] = None, + support_vectors: Optional[Sequence[float]] = None, + vectors_per_class: Optional[Sequence[int]] = None, + ) -> Tuple[Union[INT64, STRING], FLOAT]: + r"""[🌐 ai.onnx.ml::SVMClassifier(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_SVMClassifier.html#svmclassifier-1 "Online Documentation") + + + Support Vector Machine classifier + + + Args: + X: Data to be classified. + + classlabels_ints: Class labels if using integer labels.
One and only one + of the 'classlabels_*' attributes must be defined. + + classlabels_strings: Class labels if using string labels.
One and only + one of the 'classlabels_*' attributes must be defined. + + kernel_params: List of 3 elements containing gamma, coef0, and degree, in + that order. Zero if unused for the kernel. + + kernel_type: The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' + + prob_a: First set of probability coefficients. + + prob_b: Second set of probability coefficients. This array must be same size + as prob_a.
If these are provided then output Z are probability + estimates, otherwise they are raw scores. + """ + + schema = get_schema("SVMClassifier", 1, "ai.onnx.ml") + op: Callable[..., Tuple[Union[INT64, STRING], FLOAT]] = Op( + self, "SVMClassifier", schema + ) + return op( + *self._prepare_inputs(schema, X), + classlabels_ints=classlabels_ints, + classlabels_strings=classlabels_strings, + coefficients=coefficients, + kernel_params=kernel_params, + kernel_type=kernel_type, + post_transform=post_transform, + prob_a=prob_a, + prob_b=prob_b, + rho=rho, + support_vectors=support_vectors, + vectors_per_class=vectors_per_class, + ) + + def SVMRegressor( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + coefficients: Optional[Sequence[float]] = None, + kernel_params: Optional[Sequence[float]] = None, + kernel_type: str = "LINEAR", + n_supports: int = 0, + one_class: int = 0, + post_transform: str = "NONE", + rho: Optional[Sequence[float]] = None, + support_vectors: Optional[Sequence[float]] = None, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::SVMRegressor(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_SVMRegressor.html#svmregressor-1 "Online Documentation") + + + Support Vector Machine regression prediction and one-class SVM anomaly detection. + + + Args: + X: Data to be regressed. + + coefficients: Support vector coefficients. + + kernel_params: List of 3 elements containing gamma, coef0, and degree, in + that order. Zero if unused for the kernel. + + kernel_type: The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. + + n_supports: The number of support vectors. + + one_class: Flag indicating whether the regression is a one-class SVM or not. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' + + support_vectors: Chosen support vectors + """ + + schema = get_schema("SVMRegressor", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "SVMRegressor", schema) + return op( + *self._prepare_inputs(schema, X), + coefficients=coefficients, + kernel_params=kernel_params, + kernel_type=kernel_type, + n_supports=n_supports, + one_class=one_class, + post_transform=post_transform, + rho=rho, + support_vectors=support_vectors, + ) + + def Scaler( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + offset: Optional[Sequence[float]] = None, + scale: Optional[Sequence[float]] = None, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::Scaler(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_Scaler.html#scaler-1 "Online Documentation") + + + Rescale input data, for example to standardize features by removing the mean and scaling to unit variance. + + + Args: + X: Data to be scaled. + + offset: First, offset by this.
Can be length of features in an [N,F] + tensor or length 1, in which case it applies to all features, regardless + of dimension count. + + scale: Second, multiply by this.
Can be length of features in an [N,F] + tensor or length 1, in which case it applies to all features, regardless + of dimension count.
Must be same length as 'offset' + """ + + schema = get_schema("Scaler", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "Scaler", schema) + return op(*self._prepare_inputs(schema, X), offset=offset, scale=scale) + + def TreeEnsembleClassifier( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + base_values: Optional[Sequence[float]] = None, + class_ids: Optional[Sequence[int]] = None, + class_nodeids: Optional[Sequence[int]] = None, + class_treeids: Optional[Sequence[int]] = None, + class_weights: Optional[Sequence[float]] = None, + classlabels_int64s: Optional[Sequence[int]] = None, + classlabels_strings: Optional[Sequence[str]] = None, + nodes_falsenodeids: Optional[Sequence[int]] = None, + nodes_featureids: Optional[Sequence[int]] = None, + nodes_hitrates: Optional[Sequence[float]] = None, + nodes_missing_value_tracks_true: Optional[Sequence[int]] = None, + nodes_modes: Optional[Sequence[str]] = None, + nodes_nodeids: Optional[Sequence[int]] = None, + nodes_treeids: Optional[Sequence[int]] = None, + nodes_truenodeids: Optional[Sequence[int]] = None, + nodes_values: Optional[Sequence[float]] = None, + post_transform: str = "NONE", + ) -> Tuple[Union[INT64, STRING], FLOAT]: + r"""[🌐 ai.onnx.ml::TreeEnsembleClassifier(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_TreeEnsembleClassifier.html#treeensembleclassifier-1 "Online Documentation") + + + Tree Ensemble classifier. Returns the top class for each of N inputs. + + The attributes named 'nodes_X' form a sequence of tuples, associated by + index into the sequences, which must all be of equal length. These tuples + define the nodes. + + Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves. + A leaf may have multiple votes, where each vote is weighted by + the associated class_weights index. + + One and only one of classlabels_strings or classlabels_int64s + will be defined. The class_ids are indices into this list. + + + Args: + X: Input of shape [N,F] + + base_values: Base values for classification, added to final class score; the + size must be the same as the classes or can be left unassigned (assumed + 0) + + class_ids: The index of the class list that each weight is for. + + class_nodeids: node id that this weight is for. + + class_treeids: The id of the tree that this node is in. + + class_weights: The weight for the class in class_id. + + classlabels_int64s: Class labels if using integer labels.
One and only + one of the 'classlabels_*' attributes must be defined. + + classlabels_strings: Class labels if using string labels.
One and only + one of the 'classlabels_*' attributes must be defined. + + nodes_falsenodeids: Child node if expression is false. + + nodes_featureids: Feature id for each node. + + nodes_hitrates: Popularity of each node, used for performance and may be + omitted. + + nodes_missing_value_tracks_true: For each node, define what to do in the + presence of a missing value: if a value is missing (NaN), use the 'true' + or 'false' branch based on the value in this array.
This attribute + may be left undefined, and the defalt value is false (0) for all nodes. + + nodes_modes: The node kind, that is, the comparison to make at the node. + There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', + 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', + 'LEAF' + + nodes_nodeids: Node id for each node. Ids may restart at zero for each tree, + but it not required to. + + nodes_treeids: Tree id for each node. + + nodes_truenodeids: Child node if expression is true. + + nodes_values: Thresholds to do the splitting on for each node. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' + """ + + schema = get_schema("TreeEnsembleClassifier", 1, "ai.onnx.ml") + op: Callable[..., Tuple[Union[INT64, STRING], FLOAT]] = Op( + self, "TreeEnsembleClassifier", schema + ) + return op( + *self._prepare_inputs(schema, X), + base_values=base_values, + class_ids=class_ids, + class_nodeids=class_nodeids, + class_treeids=class_treeids, + class_weights=class_weights, + classlabels_int64s=classlabels_int64s, + classlabels_strings=classlabels_strings, + nodes_falsenodeids=nodes_falsenodeids, + nodes_featureids=nodes_featureids, + nodes_hitrates=nodes_hitrates, + nodes_missing_value_tracks_true=nodes_missing_value_tracks_true, + nodes_modes=nodes_modes, + nodes_nodeids=nodes_nodeids, + nodes_treeids=nodes_treeids, + nodes_truenodeids=nodes_truenodeids, + nodes_values=nodes_values, + post_transform=post_transform, + ) + + def TreeEnsembleRegressor( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + aggregate_function: str = "SUM", + base_values: Optional[Sequence[float]] = None, + n_targets: Optional[int] = None, + nodes_falsenodeids: Optional[Sequence[int]] = None, + nodes_featureids: Optional[Sequence[int]] = None, + nodes_hitrates: Optional[Sequence[float]] = None, + nodes_missing_value_tracks_true: Optional[Sequence[int]] = None, + nodes_modes: Optional[Sequence[str]] = None, + nodes_nodeids: Optional[Sequence[int]] = None, + nodes_treeids: Optional[Sequence[int]] = None, + nodes_truenodeids: Optional[Sequence[int]] = None, + nodes_values: Optional[Sequence[float]] = None, + post_transform: str = "NONE", + target_ids: Optional[Sequence[int]] = None, + target_nodeids: Optional[Sequence[int]] = None, + target_treeids: Optional[Sequence[int]] = None, + target_weights: Optional[Sequence[float]] = None, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::TreeEnsembleRegressor(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_TreeEnsembleRegressor.html#treeensembleregressor-1 "Online Documentation") + + + Tree Ensemble regressor. Returns the regressed values for each input in N. + + All args with nodes_ are fields of a tuple of tree nodes, and + it is assumed they are the same length, and an index i will decode the + tuple across these inputs. Each node id can appear only once + for each tree id. + + All fields prefixed with target_ are tuples of votes at the leaves. + + A leaf may have multiple votes, where each vote is weighted by + the associated target_weights index. + + All trees must have their node ids start at 0 and increment by 1. + + Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF + + + Args: + X: Input of shape [N,F] + + aggregate_function: Defines how to aggregate leaf values within a target. +
One of 'AVERAGE,' 'SUM,' 'MIN,' 'MAX.' + + base_values: Base values for classification, added to final class score; the + size must be the same as the classes or can be left unassigned (assumed + 0) + + n_targets: The total number of targets. + + nodes_falsenodeids: Child node if expression is false + + nodes_featureids: Feature id for each node. + + nodes_hitrates: Popularity of each node, used for performance and may be + omitted. + + nodes_missing_value_tracks_true: For each node, define what to do in the + presence of a NaN: use the 'true' (if the attribute value is 1) or + 'false' (if the attribute value is 0) branch based on the value in this + array.
This attribute may be left undefined and the defalt value is + false (0) for all nodes. + + nodes_modes: The node kind, that is, the comparison to make at the node. + There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', + 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', + 'LEAF' + + nodes_nodeids: Node id for each node. Node ids must restart at zero for each + tree and increase sequentially. + + nodes_treeids: Tree id for each node. + + nodes_truenodeids: Child node if expression is true + + nodes_values: Thresholds to do the splitting on for each node. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' + + target_ids: The index of the target that each weight is for + + target_nodeids: The node id of each weight + + target_treeids: The id of the tree that each node is in. + + target_weights: The weight for each target + """ + + schema = get_schema("TreeEnsembleRegressor", 1, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "TreeEnsembleRegressor", schema) + return op( + *self._prepare_inputs(schema, X), + aggregate_function=aggregate_function, + base_values=base_values, + n_targets=n_targets, + nodes_falsenodeids=nodes_falsenodeids, + nodes_featureids=nodes_featureids, + nodes_hitrates=nodes_hitrates, + nodes_missing_value_tracks_true=nodes_missing_value_tracks_true, + nodes_modes=nodes_modes, + nodes_nodeids=nodes_nodeids, + nodes_treeids=nodes_treeids, + nodes_truenodeids=nodes_truenodeids, + nodes_values=nodes_values, + post_transform=post_transform, + target_ids=target_ids, + target_nodeids=target_nodeids, + target_treeids=target_treeids, + target_weights=target_weights, + ) + + def ZipMap( + self, + X: FLOAT, + classlabels_int64s: Optional[Sequence[int]] = None, + classlabels_strings: Optional[Sequence[str]] = None, + ) -> Union[Sequence[Mapping[int, FLOAT]], Sequence[Mapping[str, FLOAT]]]: + r"""[🌐 ai.onnx.ml::ZipMap(1)](https://onnx.ai/onnx/operators/onnx_aionnxml_ZipMap.html#zipmap-1 "Online Documentation") + + + Creates a map from the input and the attributes. + + The values are provided by the input tensor, while the keys are specified by the attributes. + Must provide keys in either classlabels_strings or classlabels_int64s (but not both). + + The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys. + + + + Args: + X: The input values + + classlabels_int64s: The keys when using int keys.
One and only one of the + 'classlabels_*' attributes must be defined. + + classlabels_strings: The keys when using string keys.
One and only one of + the 'classlabels_*' attributes must be defined. + """ + + schema = get_schema("ZipMap", 1, "ai.onnx.ml") + op: Callable[ + ..., Union[Sequence[Mapping[int, FLOAT]], Sequence[Mapping[str, FLOAT]]] + ] = Op(self, "ZipMap", schema) + return op( + *self._prepare_inputs(schema, X), + classlabels_int64s=classlabels_int64s, + classlabels_strings=classlabels_strings, + ) diff --git a/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml2.py b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml2.py new file mode 100644 index 0000000000..1a3ab6fcb8 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml2.py @@ -0,0 +1,106 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1 +from onnxscript.onnx_types import FLOAT, INT64, STRING +from onnxscript.values import Op, Opset + + +class Opset_ai_onnx_ml2(Opset_ai_onnx_ml1): + def __new__(cls): + return Opset.__new__(cls, "ai.onnx.ml", 2) + + def __init__(self): + super().__init__() + + def LabelEncoder( + self, + X: Union[FLOAT, INT64, STRING], + default_float: float = -0.0, + default_int64: int = -1, + default_string: str = "_Unused", + keys_floats: Optional[Sequence[float]] = None, + keys_int64s: Optional[Sequence[int]] = None, + keys_strings: Optional[Sequence[str]] = None, + values_floats: Optional[Sequence[float]] = None, + values_int64s: Optional[Sequence[int]] = None, + values_strings: Optional[Sequence[str]] = None, + ) -> Union[FLOAT, INT64, STRING]: + r"""[🌐 ai.onnx.ml::LabelEncoder(2)](https://onnx.ai/onnx/operators/onnx_aionnxml_LabelEncoder.html#labelencoder-2 "Online Documentation") + + + Maps each element in the input tensor to another value. + + The mapping is determined by the two parallel attributes, 'keys_*' and + 'values_*' attribute. The i-th value in the specified 'keys_*' attribute + would be mapped to the i-th value in the specified 'values_*' attribute. It + implies that input's element type and the element type of the specified + 'keys_*' should be identical while the output type is identical to the + specified 'values_*' attribute. If an input element can not be found in the + specified 'keys_*' attribute, the 'default_*' that matches the specified + 'values_*' attribute may be used as its output value. + + Let's consider an example which maps a string tensor to an integer tensor. + Assume and 'keys_strings' is ["Amy", "Sally"], 'values_int64s' is [5, 6], + and 'default_int64' is '-1'. The input ["Dori", "Amy", "Amy", "Sally", + "Sally"] would be mapped to [-1, 5, 5, 6, 6]. + + Since this operator is an one-to-one mapping, its input and output shapes + are the same. Notice that only one of 'keys_*'/'values_*' can be set. + + For key look-up, bit-wise comparison is used so even a float NaN can be + mapped to a value in 'values_*' attribute. + + + + Args: + X: Input data. It can be either tensor or scalar. + + default_float: A float. + + default_int64: An integer. + + default_string: A string. + + keys_floats: A list of floats. + + keys_int64s: A list of ints. + + keys_strings: A list of strings. One and only one of 'keys_*'s should be + set. + + values_floats: A list of floats. + + values_int64s: A list of ints. + + values_strings: A list of strings. One and only one of 'value_*'s should be + set. + """ + + schema = get_schema("LabelEncoder", 2, "ai.onnx.ml") + op: Callable[..., Union[FLOAT, INT64, STRING]] = Op(self, "LabelEncoder", schema) + return op( + *self._prepare_inputs(schema, X), + default_float=default_float, + default_int64=default_int64, + default_string=default_string, + keys_floats=keys_floats, + keys_int64s=keys_int64s, + keys_strings=keys_strings, + values_floats=values_floats, + values_int64s=values_int64s, + values_strings=values_strings, + ) diff --git a/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml3.py b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml3.py new file mode 100644 index 0000000000..0b23e39b4c --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset_ai_onnx_ml3.py @@ -0,0 +1,299 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Tuple, Union + +from onnx import TensorProto +from onnx.defs import get_schema + +from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2 +from onnxscript.onnx_types import DOUBLE, FLOAT, INT32, INT64, STRING +from onnxscript.values import Op, Opset + + +class Opset_ai_onnx_ml3(Opset_ai_onnx_ml2): + def __new__(cls): + return Opset.__new__(cls, "ai.onnx.ml", 3) + + def __init__(self): + super().__init__() + + def TreeEnsembleClassifier( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + base_values: Optional[Sequence[float]] = None, + base_values_as_tensor: Optional[TensorProto] = None, + class_ids: Optional[Sequence[int]] = None, + class_nodeids: Optional[Sequence[int]] = None, + class_treeids: Optional[Sequence[int]] = None, + class_weights: Optional[Sequence[float]] = None, + class_weights_as_tensor: Optional[TensorProto] = None, + classlabels_int64s: Optional[Sequence[int]] = None, + classlabels_strings: Optional[Sequence[str]] = None, + nodes_falsenodeids: Optional[Sequence[int]] = None, + nodes_featureids: Optional[Sequence[int]] = None, + nodes_hitrates: Optional[Sequence[float]] = None, + nodes_hitrates_as_tensor: Optional[TensorProto] = None, + nodes_missing_value_tracks_true: Optional[Sequence[int]] = None, + nodes_modes: Optional[Sequence[str]] = None, + nodes_nodeids: Optional[Sequence[int]] = None, + nodes_treeids: Optional[Sequence[int]] = None, + nodes_truenodeids: Optional[Sequence[int]] = None, + nodes_values: Optional[Sequence[float]] = None, + nodes_values_as_tensor: Optional[TensorProto] = None, + post_transform: str = "NONE", + ) -> Tuple[Union[INT64, STRING], FLOAT]: + r"""[🌐 ai.onnx.ml::TreeEnsembleClassifier(3)](https://onnx.ai/onnx/operators/onnx_aionnxml_TreeEnsembleClassifier.html#treeensembleclassifier-3 "Online Documentation") + + + Tree Ensemble classifier. Returns the top class for each of N inputs. + + The attributes named 'nodes_X' form a sequence of tuples, associated by + index into the sequences, which must all be of equal length. These tuples + define the nodes. + + Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves. + A leaf may have multiple votes, where each vote is weighted by + the associated class_weights index. + + One and only one of classlabels_strings or classlabels_int64s + will be defined. The class_ids are indices into this list. + All fields ending with _as_tensor can be used instead of the + same parameter without the suffix if the element type is double and not float. + + + Args: + X: Input of shape [N,F] + + base_values: Base values for classification, added to final class score; the + size must be the same as the classes or can be left unassigned (assumed + 0) + + base_values_as_tensor: Base values for classification, added to final class + score; the size must be the same as the classes or can be left + unassigned (assumed 0) + + class_ids: The index of the class list that each weight is for. + + class_nodeids: node id that this weight is for. + + class_treeids: The id of the tree that this node is in. + + class_weights: The weight for the class in class_id. + + class_weights_as_tensor: The weight for the class in class_id. + + classlabels_int64s: Class labels if using integer labels.
One and only + one of the 'classlabels_*' attributes must be defined. + + classlabels_strings: Class labels if using string labels.
One and only + one of the 'classlabels_*' attributes must be defined. + + nodes_falsenodeids: Child node if expression is false. + + nodes_featureids: Feature id for each node. + + nodes_hitrates: Popularity of each node, used for performance and may be + omitted. + + nodes_hitrates_as_tensor: Popularity of each node, used for performance and + may be omitted. + + nodes_missing_value_tracks_true: For each node, define what to do in the + presence of a missing value: if a value is missing (NaN), use the 'true' + or 'false' branch based on the value in this array.
This attribute + may be left undefined, and the defalt value is false (0) for all nodes. + + nodes_modes: The node kind, that is, the comparison to make at the node. + There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', + 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', + 'LEAF' + + nodes_nodeids: Node id for each node. Ids may restart at zero for each tree, + but it not required to. + + nodes_treeids: Tree id for each node. + + nodes_truenodeids: Child node if expression is true. + + nodes_values: Thresholds to do the splitting on for each node. + + nodes_values_as_tensor: Thresholds to do the splitting on for each node. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' + """ + + schema = get_schema("TreeEnsembleClassifier", 3, "ai.onnx.ml") + op: Callable[..., Tuple[Union[INT64, STRING], FLOAT]] = Op( + self, "TreeEnsembleClassifier", schema + ) + return op( + *self._prepare_inputs(schema, X), + base_values=base_values, + base_values_as_tensor=base_values_as_tensor, + class_ids=class_ids, + class_nodeids=class_nodeids, + class_treeids=class_treeids, + class_weights=class_weights, + class_weights_as_tensor=class_weights_as_tensor, + classlabels_int64s=classlabels_int64s, + classlabels_strings=classlabels_strings, + nodes_falsenodeids=nodes_falsenodeids, + nodes_featureids=nodes_featureids, + nodes_hitrates=nodes_hitrates, + nodes_hitrates_as_tensor=nodes_hitrates_as_tensor, + nodes_missing_value_tracks_true=nodes_missing_value_tracks_true, + nodes_modes=nodes_modes, + nodes_nodeids=nodes_nodeids, + nodes_treeids=nodes_treeids, + nodes_truenodeids=nodes_truenodeids, + nodes_values=nodes_values, + nodes_values_as_tensor=nodes_values_as_tensor, + post_transform=post_transform, + ) + + def TreeEnsembleRegressor( + self, + X: Union[DOUBLE, FLOAT, INT32, INT64], + aggregate_function: str = "SUM", + base_values: Optional[Sequence[float]] = None, + base_values_as_tensor: Optional[TensorProto] = None, + n_targets: Optional[int] = None, + nodes_falsenodeids: Optional[Sequence[int]] = None, + nodes_featureids: Optional[Sequence[int]] = None, + nodes_hitrates: Optional[Sequence[float]] = None, + nodes_hitrates_as_tensor: Optional[TensorProto] = None, + nodes_missing_value_tracks_true: Optional[Sequence[int]] = None, + nodes_modes: Optional[Sequence[str]] = None, + nodes_nodeids: Optional[Sequence[int]] = None, + nodes_treeids: Optional[Sequence[int]] = None, + nodes_truenodeids: Optional[Sequence[int]] = None, + nodes_values: Optional[Sequence[float]] = None, + nodes_values_as_tensor: Optional[TensorProto] = None, + post_transform: str = "NONE", + target_ids: Optional[Sequence[int]] = None, + target_nodeids: Optional[Sequence[int]] = None, + target_treeids: Optional[Sequence[int]] = None, + target_weights: Optional[Sequence[float]] = None, + target_weights_as_tensor: Optional[TensorProto] = None, + ) -> FLOAT: + r"""[🌐 ai.onnx.ml::TreeEnsembleRegressor(3)](https://onnx.ai/onnx/operators/onnx_aionnxml_TreeEnsembleRegressor.html#treeensembleregressor-3 "Online Documentation") + + + Tree Ensemble regressor. Returns the regressed values for each input in N. + + All args with nodes_ are fields of a tuple of tree nodes, and + it is assumed they are the same length, and an index i will decode the + tuple across these inputs. Each node id can appear only once + for each tree id. + + All fields prefixed with target_ are tuples of votes at the leaves. + + A leaf may have multiple votes, where each vote is weighted by + the associated target_weights index. + + All fields ending with _as_tensor can be used instead of the + same parameter without the suffix if the element type is double and not float. + All trees must have their node ids start at 0 and increment by 1. + + Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF + + + Args: + X: Input of shape [N,F] + + aggregate_function: Defines how to aggregate leaf values within a target. +
One of 'AVERAGE,' 'SUM,' 'MIN,' 'MAX.' + + base_values: Base values for classification, added to final class score; the + size must be the same as the classes or can be left unassigned (assumed + 0) + + base_values_as_tensor: Base values for classification, added to final class + score; the size must be the same as the classes or can be left + unassigned (assumed 0) + + n_targets: The total number of targets. + + nodes_falsenodeids: Child node if expression is false + + nodes_featureids: Feature id for each node. + + nodes_hitrates: Popularity of each node, used for performance and may be + omitted. + + nodes_hitrates_as_tensor: Popularity of each node, used for performance and + may be omitted. + + nodes_missing_value_tracks_true: For each node, define what to do in the + presence of a NaN: use the 'true' (if the attribute value is 1) or + 'false' (if the attribute value is 0) branch based on the value in this + array.
This attribute may be left undefined and the defalt value is + false (0) for all nodes. + + nodes_modes: The node kind, that is, the comparison to make at the node. + There is no comparison to make at a leaf node.
One of 'BRANCH_LEQ', + 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', + 'LEAF' + + nodes_nodeids: Node id for each node. Node ids must restart at zero for each + tree and increase sequentially. + + nodes_treeids: Tree id for each node. + + nodes_truenodeids: Child node if expression is true + + nodes_values: Thresholds to do the splitting on for each node. + + nodes_values_as_tensor: Thresholds to do the splitting on for each node. + + post_transform: Indicates the transform to apply to the score.
One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' + + target_ids: The index of the target that each weight is for + + target_nodeids: The node id of each weight + + target_treeids: The id of the tree that each node is in. + + target_weights: The weight for each target + + target_weights_as_tensor: The weight for each target + """ + + schema = get_schema("TreeEnsembleRegressor", 3, "ai.onnx.ml") + op: Callable[..., FLOAT] = Op(self, "TreeEnsembleRegressor", schema) + return op( + *self._prepare_inputs(schema, X), + aggregate_function=aggregate_function, + base_values=base_values, + base_values_as_tensor=base_values_as_tensor, + n_targets=n_targets, + nodes_falsenodeids=nodes_falsenodeids, + nodes_featureids=nodes_featureids, + nodes_hitrates=nodes_hitrates, + nodes_hitrates_as_tensor=nodes_hitrates_as_tensor, + nodes_missing_value_tracks_true=nodes_missing_value_tracks_true, + nodes_modes=nodes_modes, + nodes_nodeids=nodes_nodeids, + nodes_treeids=nodes_treeids, + nodes_truenodeids=nodes_truenodeids, + nodes_values=nodes_values, + nodes_values_as_tensor=nodes_values_as_tensor, + post_transform=post_transform, + target_ids=target_ids, + target_nodeids=target_nodeids, + target_treeids=target_treeids, + target_weights=target_weights, + target_weights_as_tensor=target_weights_as_tensor, + ) diff --git a/onnxscript/onnx_opset/_impl/opset_ai_onnx_preview_training1.py b/onnxscript/onnx_opset/_impl/opset_ai_onnx_preview_training1.py new file mode 100644 index 0000000000..63e03e22b0 --- /dev/null +++ b/onnxscript/onnx_opset/_impl/opset_ai_onnx_preview_training1.py @@ -0,0 +1,553 @@ +# -------------------------------------------------------------------------- +# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ +# ⚙️ Generated by 'python -m opgen' +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +# flake8: noqa +# mypy: disable-error-code=override +# pylint: disable=W0221,W0222,W0237,W0246,R0901 +# -------------------------------------------------------------------------- + +from typing import Callable, Optional, Sequence, Union + +from onnx.defs import get_schema + +from onnxscript.onnx_types import ( + BOOL, + COMPLEX64, + COMPLEX128, + DOUBLE, + FLOAT, + FLOAT16, + INT8, + INT16, + INT32, + INT64, + STRING, + UINT8, + UINT16, + UINT32, + UINT64, +) +from onnxscript.values import Op, Opset + + +class Opset_ai_onnx_preview_training1(Opset): + def __new__(cls): + return Opset.__new__(cls, "ai.onnx.preview.training", 1) + + def __init__(self): + super().__init__() + + def Adagrad( + self, + R: Union[DOUBLE, FLOAT], + T: INT64, + *inputs: Union[DOUBLE, FLOAT], + decay_factor: float = 0.0, + epsilon: float = 9.999999974752427e-07, + norm_coefficient: float = 0.0, + ) -> Union[DOUBLE, FLOAT]: + r"""[🌐 ai.onnx.preview.training::Adagrad(1)](https://onnx.ai/onnx/operators/onnx_aionnxpreviewtraining_Adagrad.html#adagrad-1 "Online Documentation") + + + Compute one iteration of ADAGRAD, a stochastic gradient based optimization + algorithm. This operator can conduct the optimization of multiple tensor variables. + + Let's define the behavior of this operator. As you can imagine, ADAGRAD requires + some parameters: + + - The initial learning-rate "R". + - The update count "T". That is, the number of training iterations conducted. + - A L2-norm regularization coefficient "norm_coefficient". + - A learning-rate decay factor "decay_factor". + - A small constant "epsilon" to avoid dividing-by-zero. + + At each ADAGRAD iteration, the optimized tensors are moved along a direction + computed based on their estimated gradient and accumulated squared gradient. Assume + that only a single tensor "X" is updated by this operator. We need the value of "X", + its gradient "G", and its accumulated squared gradient "H". Therefore, variables in + this operator's input list are sequentially "R", "T", "X", "G", and "H". Other + parameters are given as attributes because they are usually constants. Also, the + corresponding output tensors are the new value of "X" (called "X_new"), and then + the new accumulated squared gradient (called "H_new"). Those outputs are computed + from the given inputs following the pseudo code below. + + Let "+", "-", "*", and "/" are all element-wise arithmetic operations with + numpy-style broadcasting support. The pseudo code to compute those outputs is: + + // Compute a scalar learning-rate factor. At the first update of X, T is generally + // 0 (0-based update index) or 1 (1-based update index). + r = R / (1 + T * decay_factor); + + // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. + G_regularized = norm_coefficient * X + G; + + // Compute new accumulated squared gradient. + H_new = H + G_regularized * G_regularized; + + // Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(...) + // computes element-wise square-root. + H_adaptive = Sqrt(H_new) + epsilon + + // Compute the new value of "X". + X_new = X - r * G_regularized / H_adaptive; + + If one assign this operators to optimize multiple inputs, for example, "X_1" and "X_2", the same + pseudo code may be extended to handle all tensors jointly. More specifically, we can view "X" as a + concatenation of "X_1" and "X_2" (of course, their gradient and accumulate gradient should + be concatenated too) and then just reuse the entire pseudo code. + + Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. + In that reference paper, this operator is a special case of the Figure 1's composite mirror + descent update. + + + Args: + R: The initial learning rate. + + T: The update count of "X". It should be a scalar. + + inputs: (variadic, heterogeneous) The current values of optimized tensors, + followed by their respective gradients, followed by their respective + accumulated squared gradients.For example, if two tensor "X_1" and "X_2" + are optimized, The input list would be ["X_1", "X_2", gradient of "X_1", + gradient of "X_2", accumulated squared gradient of "X_1", accumulated + squared gradient of "X_2"]. + + decay_factor: The decay factor of learning rate after one update.The + effective learning rate is computed by r = R / (1 + T * decay_factor). + Default to 0 so that increasing update counts doesn't reduce the + learning rate. + + epsilon: Small scalar to avoid dividing by zero. + + norm_coefficient: Regularization coefficient in 0.5 * norm_coefficient * + ||X||_2^2. Default to 0, which means no regularization. + """ + + schema = get_schema("Adagrad", 1, "ai.onnx.preview.training") + op: Callable[..., Union[DOUBLE, FLOAT]] = Op(self, "Adagrad", schema) + return op( + *self._prepare_inputs(schema, R, T, *inputs), + decay_factor=decay_factor, + epsilon=epsilon, + norm_coefficient=norm_coefficient, + ) + + def Adam( + self, + R: Union[DOUBLE, FLOAT], + T: INT64, + *inputs: Union[DOUBLE, FLOAT], + alpha: float = 0.8999999761581421, + beta: float = 0.9990000128746033, + epsilon: float = 9.999999974752427e-07, + norm_coefficient: float = 0.0, + norm_coefficient_post: float = 0.0, + ) -> Union[DOUBLE, FLOAT]: + r"""[🌐 ai.onnx.preview.training::Adam(1)](https://onnx.ai/onnx/operators/onnx_aionnxpreviewtraining_Adam.html#adam-1 "Online Documentation") + + + Compute one iteration of Adam, a stochastic gradient based optimization + algorithm. This operator can conduct the optimization of multiple tensor variables. + + Let's define the behavior of this operator. First of all, Adam requires + some parameters: + + - The learning-rate "R". + - The update count "T". That is, the number of training iterations conducted. + - A L2-norm regularization coefficient "norm_coefficient". + - A small constant "epsilon" to avoid dividing-by-zero. + - Two coefficients, "alpha" and "beta". + + At each Adam iteration, the optimized tensors are moved along a direction + computed based on their exponentially-averaged historical gradient and + exponentially-averaged historical squared gradient. Assume that only a tensor + "X" is being optimized. The rest of required information is + + - the value of "X", + - "X"'s gradient (denoted by "G"), + - "X"'s exponentially-averaged historical gradient (denoted by "V"), and + - "X"'s exponentially-averaged historical squared gradient (denoted by "H"). + + Some of those parameters are passed into this operator as input tensors and others + are stored as this operator's attributes. Specifically, this operator's input tensor + list is ["R", "T", "X", "G", "V", "H"]. That is, "R" is the first input, "T" is + the second input, and so on. Other parameters are given as attributes because they + are constants. Moreover, the corresponding output tensors are + + - the new value of "X" (called "X_new"), + - the new exponentially-averaged historical gradient (denoted by "V_new"), and + - the new exponentially-averaged historical squared gradient (denoted by "H_new"). + + Those outputs are computed following the pseudo code below. + + Let "+", "-", "*", and "/" are all element-wise arithmetic operations with + numpy-style broadcasting support. The pseudo code to compute those outputs is: + + // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm. + G_regularized = norm_coefficient * X + G + + // Update exponentially-averaged historical gradient. + V_new = alpha * V + (1 - alpha) * G_regularized + + // Update exponentially-averaged historical squared gradient. + H_new = beta * H + (1 - beta) * G_regularized * G_regularized + + // Compute the element-wise square-root of H_new. V_new will be element-wisely + // divided by H_sqrt for a better update direction. + H_sqrt = Sqrt(H_new) + epsilon + + // Compute learning-rate. Note that "alpha**T"/"beta**T" is alpha's/beta's T-th power. + R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R + + // Compute new value of "X". + X_new = X - R_adjusted * V_new / H_sqrt + + // Post-update regularization. + X_final = (1 - norm_coefficient_post) * X_new + + If there are multiple inputs to be optimized, the pseudo code will be applied + independently to each of them. + + + Args: + R: The initial learning rate. + + T: The update count of "X". It should be a scalar. + + inputs: (variadic, heterogeneous) The tensors to be optimized, followed by + their respective gradients, followed by their respective accumulated + gradients (aka momentum), followed by their respective accumulated + squared gradients. For example, to optimize tensors "X_1" and "X_2,", + the input list would be ["X_1", "X_2", gradient of "X_1", gradient of + "X_2", accumulated gradient of "X_1", accumulated gradient of "X_2", + accumulated squared gradient of "X_1", accumulated squared gradient of + "X_2"]. + + alpha: Coefficient of previously accumulated gradient in running average. + Default to 0.9. + + beta: Coefficient of previously accumulated squared-gradient in running + average. Default to 0.999. + + epsilon: Small scalar to avoid dividing by zero. + + norm_coefficient: Regularization coefficient of 0.5 * norm_coefficient * + ||X||_2^2. Default to 0, which means no regularization. + + norm_coefficient_post: Regularization coefficient of 0.5 * norm_coefficient + * ||X||_2^2. Default to 0, which means no regularization. + """ + + schema = get_schema("Adam", 1, "ai.onnx.preview.training") + op: Callable[..., Union[DOUBLE, FLOAT]] = Op(self, "Adam", schema) + return op( + *self._prepare_inputs(schema, R, T, *inputs), + alpha=alpha, + beta=beta, + epsilon=epsilon, + norm_coefficient=norm_coefficient, + norm_coefficient_post=norm_coefficient_post, + ) + + def Gradient( + self, + *Inputs: Union[ + BOOL, + COMPLEX128, + COMPLEX64, + DOUBLE, + FLOAT, + FLOAT16, + INT16, + INT32, + INT64, + INT8, + STRING, + UINT16, + UINT32, + UINT64, + UINT8, + ], + xs: Optional[Sequence[str]] = None, + y: Optional[str] = None, + zs: Optional[Sequence[str]] = None, + ) -> Union[DOUBLE, FLOAT, FLOAT16]: + r"""[🌐 ai.onnx.preview.training::Gradient(1)](https://onnx.ai/onnx/operators/onnx_aionnxpreviewtraining_Gradient.html#gradient-1 "Online Documentation") + + + Gradient operator computes the partial derivatives of a specific tensor w.r.t. + some other tensors. This operator is widely used in gradient-based training + algorithms. To illustrate its use, let's consider a computation graph, + + :: + + X -----. + | + v + W --> Conv --> H --> Gemm --> Y + ^ + | + Z + + + + , where W and Z are trainable tensors. Note that operators' attributes are + omitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of + Y with respect to W (Z). The user can compute gradient by inserting Gradient + operator to form another graph shown below. + + :: + + W --> Conv --> H --> Gemm --> Y + | ^ ^ + | | | + | X Z + | | | + | | .----------' + | | | (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in + | | | "xs" followed by "zs") + | v v + '---> Gradient(xs=["W", "Z"], zs=["X"], y="Y") + | | + | '-----------------------------------> dY/dW (1st output of Gradient) + | + '---------------------------------------> dY/dZ (2nd output of Gradient) + + + + By definition, the tensor "y" is a function of independent variables in "xs" + and "zs". Since we only compute the gradient of "y" w.r.t. the differentiable + variables in "xs", this Gradient only outputs dY/dW and dY/dZ. Note that "H" + cannot appear in "xs" and "zs". The reason is that "H" can be determined by + tensors "W" and "X" and therefore "H" is not an independent variable. + + All outputs are optional. If needed, for example, user can assign an empty + string to the 1st output name of that Gradient to skip the generation of dY/dW. + Note that the concept of optional outputs can also be found in ONNX's RNN, GRU, + and LSTM. + + Gradient operator can compute derivative against intermediate tensors. For + example, the gradient of Y with respect to H can be done via + + :: + + W --> Conv --> H --> Gemm --> Y + ^ | ^ + | | | + X | Z + .-------' | + | .----------' + | | (H/Z is the 1st/2nd input of Gradient as shown in "xs") + v v + Gradient(xs=["H", "Z"], y="Y") + | | + | '-----------------------------------> dY/dH (1st output of Gradient) + | + '---------------------------------------> dY/dZ (2nd output of Gradient) + + + + It is possible to represent high-order differentiation using Gradient operators. + For example, given the following linear model: + + :: + + W --> Gemm --> Y --> Loss --> O + ^ ^ + | | + X L + + + + To compute the 2nd order derivative of O with respect to W (denoted by + d^2O/dW^2), one can do + + :: + + W --> Gemm --> Y --> Loss --> O + | ^ ^ + | | | + | X .------------L + | | | | + | | | v + +------+-+> Gradient(xs=["X", "W"], zs=["L"], y="O") ---> dO/dX (1st output of Gradient) + | | | | + | | | '---> dO/dW (2nd output of Gradient) + | v v + '---> Gradient(xs=["X", "W"], zs=["L"], y="dO/dW") ---> d(dO/dW)dX (1st output of + | Gradient) + | + | + '---> d^2O/dW^2 (2nd output of Gradient) + + + + The tensors named in attributes "xs", "zs", and "y" define the differentiated + computation graph, and the inputs to Gradient node define the values at + which the gradient is computed. We can feed different tensors to the identified + graph. For example, one can compute the gradient of Y with respect to H at + a specific value of H, H_1, by providing that value as an input to the Gradient + node. + + :: + + W --> Conv --> H --> Gemm --> Y + ^ ^ + | | + X Z + + Z_1 (2nd input of Gradient) + | + v + H_1 --> Gradient(xs=["H", "Z"], y="Y") ---> dY/dH when H = H_1 and Y = Y_1. + | + '------------------------------> dY/dZ (2nd output of Gradient) + + + + When the inputs of Gradient are the tensors named in "xs" and "zs", the + computation can be optimized. More specifically, intermediate variables in + forward pass can be reused if the gradient is computed via reverse-mode + auto-differentiation. + + + + Args: + Inputs: (variadic, heterogeneous) The values fed into graph identified by + the attributes. The i-th input is the value of the i-th tensor specified + in the concatenated list of the attribute "xs" and the attribute "zs". + For example, if xs=["A", "B"] and zs=["C"], the first input is used as + the value of symbol "A" and the 3rd input is substituted for all the + occurrences of "C". + + xs: Input tensor names of the differentiated sub-graph. It contains only the + necessary differentiated inputs of a (sub-)graph. Variables (usually + called intermediate variables) that can be generated from inputs cannot + be included in this attribute. + + y: The targeted tensor. It can be viewed as the output of the differentiated + function. The attribute "xs" and attribute "zs" are the minimal + independent variable set that determines the value of "y". + + zs: Input tensor names of the differentiated sub-graph. It contains only the + necessary non-differentiated inputs of a (sub-)graph. Variables (usually + called intermediate variables) that can be generated from inputs cannot + be included in this attribute. + """ + + schema = get_schema("Gradient", 1, "ai.onnx.preview.training") + op: Callable[..., Union[DOUBLE, FLOAT, FLOAT16]] = Op(self, "Gradient", schema) + return op(*self._prepare_inputs(schema, *Inputs), xs=xs, y=y, zs=zs) + + def Momentum( + self, + R: Union[DOUBLE, FLOAT], + T: INT64, + *inputs: Union[DOUBLE, FLOAT], + alpha: Optional[float] = None, + beta: Optional[float] = None, + mode: Optional[str] = None, + norm_coefficient: Optional[float] = None, + ) -> Union[DOUBLE, FLOAT]: + r"""[🌐 ai.onnx.preview.training::Momentum(1)](https://onnx.ai/onnx/operators/onnx_aionnxpreviewtraining_Momentum.html#momentum-1 "Online Documentation") + + + Compute one iteration of stochastic gradient update with momentum. + This operator can conduct the optimization of multiple tensor variables. + + Let's define the behavior of this operator. As you can imagine, SG with momentum requires + several parameters: + + - The learning-rate "R". + - The update count "T". That is, the number of conducted training iterations. It should + be zero in the first training iteration. + - A L2-norm regularization coefficient "norm_coefficient". + - A decay coefficient of previous accumulated gradient (i.e., momentum) "alpha". + - The scaling coefficient of current gradient "beta". + - An attribute to choose either standard momentum or Nesterov's momentum "mode" should + be used. + + For the sake of simplicity, assume that there is only one tensor (called "X") to be optimized. + Other necessary inputs are "X"'s gradient (called "G") and "X"'s momentum (called "V"). This + Momentum operator maps all these inputs to the new value of "X" (called "X_new") and its new + momentum (called "V_new"). + + This operator supports two different momentum algorithms. Set the attribute "mode" to + "nesterov" if Nesterov's momentum is desired. Otherwise, set the attribute "model" to + "standard" to use standard momentum. Computation details are described subsequently. + + Let "+", "-", "*", and "/" are all element-wise operations with numpy-style broadcasting. + + Pseudo code for SG with standard momentum: + + // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared + // values of all elements in X. + G_regularized = norm_coefficient * X + G + + // In the first training iteration, beta should always be 1. + beta_adjusted = T > 0 ? beta : 1 + + // Compute the current momentum based on previous momentum and the current gradient. + V_new = alpha * V + beta_adjusted * G_regularized + + // Update X. + X_new = X - R * V_new + + Pseudo code for SG with Nesterov's momentum: + + // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared + // values of all elements in X. + G_regularized = norm_coefficient * X + G; + + // In the first training iteration, beta should always be 1. + beta_adjusted = T > 0 ? beta : 1 + + // Compute the current momentum based on previous momentum and the current gradient. + V_new = alpha * V + beta_adjusted * G_regularized; + + // Compute final update direction and then update X. + X_new = X - R * (G_regularized + alpha * V_new) + + If one assign this operators to optimize multiple inputs, for example, "X_1" and "X_2". The same + pseudo code would be extended to handle all tensors jointly. More specifically, we can view "X" as a + concatenation of "X_1" and "X_2" (of course, their gradient and accumulate gradient should + be concatenated too) and then our pseudo code becomes applicable. + + + Args: + R: The learning rate. + + T: Update count of "X". It should be a scalar. + + inputs: (variadic, heterogeneous) It sequentially contains the current + values of optimized tensors, then their gradient tensors, and finally + their momentum tensors. For example, if two tensors "X_1" and "X_2" are + optimized, The expected input list would be ["X_1", "X_2", gradient of + "X_1", gradient of "X_2", momentum of "X_1", momentum of "X_2"]. + + alpha: The decay factor of momentum. It should be a scalar. + + beta: The coefficient of gradient in computing new momentum. It should be a + scalar. + + mode: Its value should be either "nesterov" or "standard". The value + "nesterov" leads to the use of Nesterov's momentum while "standard" + invokes stochastic gradient method using standard momentum + + norm_coefficient: Coefficient of 0.5 * norm_coefficient * ||X||^2. + """ + + schema = get_schema("Momentum", 1, "ai.onnx.preview.training") + op: Callable[..., Union[DOUBLE, FLOAT]] = Op(self, "Momentum", schema) + return op( + *self._prepare_inputs(schema, R, T, *inputs), + alpha=alpha, + beta=beta, + mode=mode, + norm_coefficient=norm_coefficient, + ) diff --git a/onnxscript/onnx_types.py b/onnxscript/onnx_types.py index 2c40a6997e..8c8db3b17b 100644 --- a/onnxscript/onnx_types.py +++ b/onnxscript/onnx_types.py @@ -39,6 +39,8 @@ def check_shape(shape): class TensorType: """ONNX Script representation of a tensor type.""" + default_instance: Optional["TensorType"] = None + def __init__(self, dtype, shape: Optional[ShapeType] = None) -> None: self.dtype = dtype self.shape = shape @@ -53,6 +55,13 @@ def __getitem__(self, shape: Optional[ShapeType]): shape = (None,) return TensorType(self.dtype, shape) + def __class_getitem__(cls, shape: Optional[ShapeType]): + if cls.default_instance is None: + raise TypeError(f"{cls} does not specify a default_instance.") + # pylint erroneously flags with unsubscriptable-object if + # using subscript notation (cls.default_instance[shape]): + return cls.default_instance.__getitem__(shape) + def to_type_proto(self) -> onnx.TypeProto: if self.shape is None: shape = () # "FLOAT" is treated as a scalar @@ -65,22 +74,94 @@ def to_type_proto(self) -> onnx.TypeProto: return onnx.helper.make_tensor_type_proto(self.dtype, shape) -FLOAT = TensorType(onnx.TensorProto.FLOAT) -UINT8 = TensorType(onnx.TensorProto.UINT8) -INT8 = TensorType(onnx.TensorProto.INT8) -UINT16 = TensorType(onnx.TensorProto.UINT16) -INT16 = TensorType(onnx.TensorProto.INT16) -INT32 = TensorType(onnx.TensorProto.INT32) -INT64 = TensorType(onnx.TensorProto.INT64) -STRING = TensorType(onnx.TensorProto.STRING) -BOOL = TensorType(onnx.TensorProto.BOOL) -FLOAT16 = TensorType(onnx.TensorProto.FLOAT16) -DOUBLE = TensorType(onnx.TensorProto.DOUBLE) -UINT32 = TensorType(onnx.TensorProto.UINT32) -UINT64 = TensorType(onnx.TensorProto.UINT64) -COMPLEX64 = TensorType(onnx.TensorProto.COMPLEX64) -COMPLEX128 = TensorType(onnx.TensorProto.COMPLEX128) -BFLOAT16 = TensorType(onnx.TensorProto.BFLOAT16) +class _BuiltinTensorType: + def __init__(self, tensor_proto: onnx.TensorProto): + self.tensor_proto = tensor_proto + + def __call__(self, cls): + cls.default_instance = TensorType(self.tensor_proto) + cls.to_type_proto = cls.default_instance.to_type_proto + return cls + + +@_BuiltinTensorType(onnx.TensorProto.FLOAT) +class FLOAT(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.UINT8) +class UINT8(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.INT8) +class INT8(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.UINT16) +class UINT16(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.INT16) +class INT16(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.INT32) +class INT32(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.INT64) +class INT64(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.STRING) +class STRING(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.BOOL) +class BOOL(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.FLOAT16) +class FLOAT16(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.DOUBLE) +class DOUBLE(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.UINT32) +class UINT32(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.UINT64) +class UINT64(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.COMPLEX64) +class COMPLEX64(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.COMPLEX128) +class COMPLEX128(TensorType): + pass + + +@_BuiltinTensorType(onnx.TensorProto.BFLOAT16) +class BFLOAT16(TensorType): + pass def onnx_type_to_onnxscript_repr(onnx_type: onnx.TypeProto) -> str: diff --git a/onnxscript/tensor.py b/onnxscript/tensor.py index 38bd55af1b..c3aea337a2 100644 --- a/onnxscript/tensor.py +++ b/onnxscript/tensor.py @@ -2,8 +2,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -------------------------------------------------------------------------- + from __future__ import annotations +from typing import Any + import numpy as np from onnx import TensorProto from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE @@ -21,7 +24,7 @@ def __init__(self, nparray, opset=None): raise TypeError(f"Unexpected type {type(nparray)}. It must be a numpy array.") self._nparray = nparray - self._opset = opset or onnx_opset.default_opset + self._opset: Any = opset or onnx_opset.default_opset @property def value(self): diff --git a/onnxscript/values.py b/onnxscript/values.py index ed919c99df..6aa80fa156 100644 --- a/onnxscript/values.py +++ b/onnxscript/values.py @@ -8,10 +8,11 @@ import logging import types from enum import IntFlag -from typing import Any, _GenericAlias # type: ignore[attr-defined] +from typing import Any, Optional, _GenericAlias # type: ignore[attr-defined] import numpy as np import onnx +from onnx.defs import OpSchema from onnxscript import debuginfo, irbuilder, tensor @@ -27,10 +28,10 @@ class Opset: Only a single instance of Opset is created for a given (domain, version) pair. """ - cache: dict[tuple[str, int], Opset] = {} + cache: dict[tuple[type, str, int], Opset] = {} def __new__(cls, domain: str, version: int): - key = (domain, version) + key = (cls, domain, version) existing = cls.cache.get(key) if existing: return existing @@ -41,13 +42,13 @@ def __new__(cls, domain: str, version: int): cls.cache[key] = instance return instance - def __repr__(self): - return f"{self.__class__.__name__}({self.domain!r}, {self.version!r})" - - def __init__(self, domain: str, version: int): + def __init__(self, domain: Optional[str] = None, version: Optional[int] = None): # Nothing to do. Object is initialized by __new__ pass + def __repr__(self): + return f"{self.__class__.__name__}({self.domain!r}, {self.version!r})" + def __getitem__(self, opname): try: return onnx.defs.get_schema(opname, self.version, self.domain) @@ -80,6 +81,17 @@ def add_function_def(self, fun): ) self.function_defs[fun.name] = fun + def _prepare_inputs(self, _: OpSchema, *inputs): + """Trims 'None' values from the end of the inputs list. This is used to support + omitting optional inputs when no more required inputs follow to prepare a valid call + against the Op. Used by the static opset code generator. + """ + # TODO: validate the op schema as 'None' values are removed? + input_list = list(inputs) + while input_list and input_list[-1] is None: + del input_list[-1] + return input_list + # ONNX ops diff --git a/opgen/__init__.py b/opgen/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/opgen/__main__.py b/opgen/__main__.py new file mode 100644 index 0000000000..7565d8cdec --- /dev/null +++ b/opgen/__main__.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +import subprocess +from os import makedirs +from pathlib import Path +from shutil import rmtree + +from opgen.onnx_opset_builder import OpsetsBuilder + +MIN_REQUIRED_ONNX_OPSET_VERSION = 14 + +self_dir = Path(__file__).parent +repo_root = self_dir.parent + +module_base_names = ["onnxscript", "onnx_opset"] +opsets_path = repo_root.joinpath(*module_base_names) + +try: + rmtree(opsets_path) +except FileNotFoundError: + pass # if base_path doesn't exist, that's great + +# need to generate a blank onnx_opset module since +# onnxscript/__init__.py will import it (and we deleted it above); +# it will be overridden with correct code as part of the generation +# below. +makedirs(opsets_path) +with open(opsets_path.joinpath("__init__.py"), "w", encoding="utf-8"): + pass + +builder = OpsetsBuilder(".".join(module_base_names), MIN_REQUIRED_ONNX_OPSET_VERSION) +paths = builder.write(repo_root) +subprocess.check_call(["black", "--quiet", *paths]) +subprocess.check_call(["isort", "--quiet", *paths]) + +print(f"Generated Ops: {builder.all_ops_count}") + +if len(builder.unsupported_ops) > 0: + print("Unsupported Ops:") + for key, errors in sorted(builder.unsupported_ops.items()): + print(f" error: {key}:") + for error in errors: + print(f" - {error.op}") + print(f" {error.op.docuri}") diff --git a/opgen/onnx_opset_builder.py b/opgen/onnx_opset_builder.py new file mode 100644 index 0000000000..91ae06b748 --- /dev/null +++ b/opgen/onnx_opset_builder.py @@ -0,0 +1,574 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +from os import makedirs +from pathlib import Path +from textwrap import dedent +from typing import Any, Iterable, Optional, TextIO + +from onnx.defs import AttributeProto, OpSchema, get_all_schemas_with_history +from onnx.helper import get_attribute_value + +import opgen.pygen as cg + +__all__ = [ + "QualOpName", + "UnsupportedOpError", + "OpsetsBuilder", + "parse_attr_type", + "parse_input_output_type", +] + +MODULE_ONNX = "onnx" +MODULE_ONNX_DEFS = "onnx.defs" +MODULE_ONNX_SCRIPT_TYPES = "onnxscript.onnx_types" +MODULE_ONNX_SCRIPT_VALUES = "onnxscript.values" + + +class QualOpName: + def __init__(self, domain: str, name: str, version: int): + self.domain = domain + self.name = name + self.version = version + self.docuri = ( + "https://onnx.ai/onnx/operators/onnx_" + f"{domain.replace('.', '')}_{name}.html#{name.lower()}-{version}" + ) + + def __repr__(self) -> str: + return ( + f"QualOpName(domain={self.domain!r}, " + f"version={self.version!r}, name={self.name!r})" + ) + + def __str__(self) -> str: + domain_prefix = f"{self.domain}::" if self.domain else "" + return f"{domain_prefix}{self.name}({self.version})" + + +class OpsetBaseTypeRef(cg.TypeRef): + def __init__(self): + super().__init__(MODULE_ONNX_SCRIPT_VALUES, "Opset") + + +class TensorTypeRef(cg.TypeRef): + def __init__(self): + super().__init__(MODULE_ONNX_SCRIPT_TYPES, "Tensor") + + +class UnsupportedOpError(NotImplementedError): + def __init__(self, op: QualOpName, message: str): + super().__init__(self, message) + self.op = op + self.message = message + + +def _make_suffix(str: str) -> str: + return f"_{str.replace('.', '_')}" if str else "" + + +def _make_class_name(domain: str, version: int) -> str: + return f"Opset{_make_suffix(domain)}{version}" + + +def _make_module_name(base_name: str, domain: str, version: int) -> str: + return f"{base_name}._impl.opset{_make_suffix(domain)}{version}" + + +class OpsetModule(cg.Module): + def __init__(self, base_name: str, domain: str, version: int, *members: cg.Stmt): + self.domain = domain + self.version = version + super().__init__(*members, name=_make_module_name(base_name, domain, version)) + + +class OpsetsBuilder: + def __init__(self, module_base_name: str, min_default_opset_version: int): + self.module_base_name = module_base_name + self.min_default_opset_version = min_default_opset_version + self.all_ops_count: int = 0 + self.all_modules: list[cg.Module] = [] + self.unsupported_ops: dict[str, list[UnsupportedOpError]] = {} + self._make_opset_modules() + self._make_init_module() + self._make_imports() + + def _make_opset_modules(self): + domains = {} + schemas: list[OpSchema] = sorted( + get_all_schemas_with_history(), + key=lambda op: (op.domain, op.since_version, op.name), + ) + + for schema in schemas: + qualname = QualOpName(schema.domain, schema.name, schema.since_version) + domain: str = schema.domain + version: int = schema.since_version + domain_opsets = domains.setdefault(domain, {}) + + if version in domain_opsets: + opset = domain_opsets[version] + else: + if version > 1: + base_type = cg.TypeRef( + _make_module_name(self.module_base_name, domain, version - 1), + _make_class_name(domain, version - 1), + ) + else: + base_type = OpsetBaseTypeRef() + + opset = OpsetModule( + self.module_base_name, + domain, + version, + cg.ClassDef( + _make_class_name(domain, version), + cg.FunctionDef( + "__new__", + cg.Arg("cls"), + body=cg.ThunkStmt( + f"return Opset.__new__(cls, " f"{domain!r}, {version!r})" + ), + ), + cg.FunctionDef( + "__init__", cg.Arg("self"), body=cg.ThunkStmt("super().__init__()") + ), + bases=[base_type], + ), + ) + + self.all_modules.append(opset) + domain_opsets[version] = opset + + try: + function = self._make_function(qualname, schema) + opset_class = cg.first_or_none(opset.get_children_of_type(cg.ClassDef)) + if opset_class: + opset_class.append_body(function) + self.all_ops_count += 1 + except NotImplementedError as error: + if not isinstance(error, UnsupportedOpError): + error = UnsupportedOpError(qualname, str(error)) + unsupported_set = self.unsupported_ops.setdefault(error.message, []) + unsupported_set.append(error) + + for module in self.all_modules: + module.accept(cg.DocCommentBuilder()) + + self.all_modules.sort(key=lambda m: (m.domain, m.version, m.name)) + + def _make_init_module(self): + all_list = cg.ListExpr(cg.Constant("default_opset"), cg.Constant("all_opsets")) + init_module = cg.Module( + cg.ImportFrom(MODULE_ONNX_DEFS, cg.Alias("onnx_opset_version")), + cg.Assign(cg.Name("__all__"), all_list), + cg.If( + cg.BinOp( + cg.Call(cg.Name("onnx_opset_version")), + "<", + cg.Constant(self.min_default_opset_version), + ), + cg.Raise( + cg.Call( + cg.Name("ImportError"), + cg.ThunkExpr( + 'f"ONNX Script requires ONNX opset >= ' + f"{self.min_default_opset_version} " + 'but {onnx_opset_version()} is detected."' + ), + ) + ), + ), + name=f"{self.module_base_name}.__init__", + ) + + all_opsets = cg.DictExpr() + for opset_module in filter(lambda m: isinstance(m, OpsetModule), self.all_modules): + opset_module: OpsetModule + opset_class = cg.first_or_none(opset_module.get_children_of_type(cg.ClassDef)) + if opset_class is not None: + opset_export_name = opset_module.name.split(".")[-1] + all_opsets.append_element( + cg.DictElem( + cg.TupleExpr( + cg.Constant(opset_module.domain), cg.Constant(opset_module.version) + ), + cg.Name(opset_export_name), + ) + ) + all_list.append_child( + cg.Constant(opset_export_name), cg.ListExpr.Roles.Elements + ) + init_module.append_body( + cg.Assign(cg.Name(opset_export_name), cg.Call(opset_class.make_typeref())) + ) + init_module.append_body(cg.Assign(cg.Name("all_opsets"), all_opsets)) + + default_opset = cg.Assign( + cg.Name("default_opset"), + cg.Subscript( + cg.Name("all_opsets"), + cg.TupleExpr(cg.Constant(""), cg.Call(cg.Name("onnx_opset_version"))), + ), + cg.TypeRef(None, f"Opset{self.min_default_opset_version}"), + ) + default_opset.trailing_trivia = " # type: ignore" + init_module.append_body(default_opset) + + self.all_modules.append(init_module) + + def _make_imports(self): + for module in self.all_modules: + if isinstance(module, OpsetModule): + module.prepend_child( + cg.ImportFrom(MODULE_ONNX_DEFS, cg.Alias("get_schema")), + cg.Module.Roles.Body, + ) + module.prepend_child( + cg.ImportFrom(MODULE_ONNX_SCRIPT_VALUES, cg.Alias("Op, Opset")), + cg.Module.Roles.Body, + ) + module.accept(cg.ImportAdjuster()) + + def _make_function(self, qualname: QualOpName, schema: OpSchema) -> cg.FunctionDef: + op_inputs: list[cg.Expr] = [] + op_attrs: list[cg.Expr] = [] + args = list(self._make_function_args(schema)) + + for arg in args: + if arg.name == "self": + continue + if arg.is_vararg: + op_inputs.append(cg.Starred(cg.Name(arg.name))) + elif arg.is_kwarg: + op_attrs.append(cg.Assign(cg.Name(arg.name), cg.Name(arg.name))) + else: + op_inputs.append(cg.Name(arg.name)) + + if len(op_inputs) > 0: + op_call = cg.Call( + cg.Name("op"), + cg.Starred( + cg.Call(cg.Name("self._prepare_inputs"), cg.Name("schema"), *op_inputs) + ), + *op_attrs, + ) + else: + op_call = cg.Call(cg.Name("op"), *op_attrs) + + doc = f'[🌐 {qualname}]({qualname.docuri} "Online Documentation")\n\n{schema.doc}' + + def return_type(): + return cg.TypeRef.make_composite_if_multiple( + cg.TypingRefs.Tuple, + *[self._make_union_typeref(output.types) for output in schema.outputs], + ) + + func = cg.FunctionDef( + qualname.name, + *args, + return_type=return_type(), + doc=_process_documentation(doc), + body=[ + cg.Assign( + cg.Name("schema"), + cg.Call( + cg.Name("get_schema"), + cg.Constant(qualname.name), + cg.Constant(qualname.version), + cg.Constant(qualname.domain), + ), + ), + cg.Assign( + cg.Name("op"), + cg.Call( + cg.Name("Op"), + cg.Name("self"), + cg.Constant(qualname.name), + cg.Name("schema"), + ), + cg.TypingRefs.Callable(cg.EllipsisTypeRef(), return_type()), + ), + cg.Return(op_call), + ], + ) + + return func + + def _make_function_args(self, schema: OpSchema) -> Iterable[cg.Arg]: + yield cg.Arg("self") + yield from self._make_function_input_args(schema) + yield from self._make_function_attr_args(schema) + + def _make_input_arg_name(self, input_name: str, schema: OpSchema): + """ONNX allows for an op to have an input and an attribute with the same name. + Attribute names have contextual meaning however, so detect this case and disambiguate + the input name. See Split(1) for the only offending OpSchema as of opset 18. + """ + for attr in schema.attributes.values(): + if attr.name == input_name: + return f"{input_name}_" + return input_name + + def _make_function_input_args(self, schema: OpSchema) -> Iterable[cg.Arg]: + args: list[cg.Arg] = [] + for input in schema.inputs: + optional = input.option == OpSchema.FormalParameterOption.Optional + variadic = input.option == OpSchema.FormalParameterOption.Variadic + heterogeneous = not input.isHomogeneous + differentiable = ( + input.differentiationCategory + == OpSchema.DifferentiationCategory.Differentiable + ) + non_differentiable = ( + input.differentiationCategory + == OpSchema.DifferentiationCategory.NonDifferentiable + ) + + doctags = [] + if optional: + doctags.append("optional") + elif variadic: + # if we encounter a variadic input, previous + # inputs cannot have default values + for prev_arg in args: + prev_arg.default_value = None + doctags.append("variadic") + if heterogeneous: + doctags.append("heterogeneous") + if differentiable: + doctags.append("differentiable") + elif non_differentiable: + doctags.append("non-differentiable") + + doc = input.description.strip() + if len(doctags) > 0: + doc = f"({', '.join(doctags)}) {doc}" + + type = self._make_union_typeref(input.types) + if optional and not isinstance(type, cg.TypingRefs.Optional): + type = cg.TypingRefs.Optional(type) + + args.append( + cg.Arg( + self._make_input_arg_name(input.name, schema), + type=type, + doc=_process_documentation(doc), + is_vararg=variadic, + default_value=cg.Constant(None) if optional else None, + ) + ) + + return args + + def _make_function_attr_args(self, schema: OpSchema) -> Iterable[cg.Arg]: + attr_args = [] + for attr in schema.attributes.values(): + attr_type = parse_attr_type(attr.type) + default_value = None + + if attr.required: + pass + elif attr.default_value.name: + default_value = get_attribute_value(attr.default_value) + + def fmt(value: Any) -> str: + if isinstance(value, (bytes, bytearray)): + return str(value.decode("utf-8")) + return value + + if isinstance(default_value, list): + default_value = tuple(fmt(val) for val in default_value) + else: + default_value = fmt(default_value) + else: + default_value = None + + if default_value is None: + attr_type = cg.TypingRefs.Optional(attr_type) + + attr_args.append( + cg.Arg( + attr.name, + type=attr_type, + default_value=cg.Constant(default_value), + doc=attr.description, + is_kwarg=True, + ) + ) + + for arg in sorted(attr_args, key=lambda p: p.has_default_value): + yield arg + + def _make_union_typeref(self, onnx_types: list[str]) -> cg.TypingRefs.Union: + return cg.TypeRef.make_composite_if_multiple( + cg.TypingRefs.Union, + *[parse_input_output_type(type) for type in sorted(onnx_types)], + ) + + def write(self, base_path: Path) -> list[Path]: + return sorted([self._write_module(base_path, module) for module in self.all_modules]) + + def _write_module(self, base_path: Path, module: cg.Module) -> Path: + qual_name = module.name.split(".") + base_path = base_path.joinpath(*qual_name[:-1]) + makedirs(base_path, exist_ok=True) + path = base_path.joinpath(qual_name[-1] + ".py") + with open(path, "w", encoding="utf-8") as writer: + self._write_header(writer) + module.accept(cg.PythonWriter(writer)) + return path + + def _write_header(self, writer: TextIO): + dashline = f"# {'-' * 74}\n" + writer.write(dashline) + writer.write("# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️ \n") + writer.write("# ⚙️ Generated by 'python -m opgen'\n") + writer.write(dashline) + writer.write("# Copyright (c) Microsoft Corporation. ") + writer.write("All rights reserved.\n") + writer.write("# Licensed under the MIT License.\n") + writer.write(dashline) + writer.write("# flake8: noqa\n") + writer.write("# mypy: disable-error-code=override\n") + writer.write("# pylint: disable=W0221,W0222,W0237,W0246,R0901\n") + writer.write(dashline) + writer.write("\n") + + +def parse_input_output_type(onnx_type: str) -> cg.TypeRef: + def error(message: Optional[str] = None): + return NotImplementedError( + f"input/output type not implemented: {onnx_type!r}" + + (f" ({message!r})" if message else "") + ) + + default_value_map = { + "BOOL": bool(), + "FLOAT": float(), + "FLOAT16": float(), + "BFLOAT16": float(), + "DOUBLE": float(), + "INT8": int(), + "INT16": int(), + "INT32": int(), + "INT64": int(), + "UINT8": int(), + "UINT16": int(), + "UINT32": int(), + "UINT64": int(), + "COMPLEX64": complex(), + "COMPLEX128": complex(), + } + + id = "" + stack: list[cg.TypeRef] = [] + for c in onnx_type: + if c == "(": + if id == "tensor": + type = TensorTypeRef() + elif id == "seq": + type = cg.TypingRefs.Sequence() + elif id == "map": + type = cg.TypingRefs.Mapping() + elif id == "optional": + type = cg.TypingRefs.Optional() + else: + raise error(id) + if len(stack) > 0: + stack[-1].append_typearg(type) + stack.append(type) + id = "" + elif c in (")", ","): + type = stack.pop() if c == ")" else stack[-1] + if isinstance(type, TensorTypeRef): + type.name = id.upper() + type.default_value = cg.Constant(default_value_map.get(type.name)) + elif id and isinstance(type, cg.TypingRefs.Mapping): + if id == "int64": + type.append_typearg(cg.IntTypeRef()) + elif id == "string": + type.append_typearg(cg.StrTypeRef()) + else: + raise error(id) + elif id: + break + id = "" + if len(stack) == 0: + return type + else: + id += c + raise error() + + +def parse_attr_type(type) -> cg.TypeRef: + if type == AttributeProto.FLOAT: + return cg.FloatTypeRef() + if type == AttributeProto.INT: + return cg.IntTypeRef() + if type == AttributeProto.STRING: + return cg.StrTypeRef() + if type == AttributeProto.TENSOR: + return cg.TypeRef(MODULE_ONNX, "TensorProto") + if type == AttributeProto.SPARSE_TENSOR: + return cg.TypeRef(MODULE_ONNX, "SparseTensorProto") + if type == AttributeProto.GRAPH: + return cg.TypeRef(MODULE_ONNX, "GraphProto") + if type == AttributeProto.TYPE_PROTO: + return cg.TypeRef(MODULE_ONNX, "TypeProto") + if type == AttributeProto.FLOATS: + return cg.TypingRefs.Sequence(cg.FloatTypeRef()) + if type == AttributeProto.INTS: + return cg.TypingRefs.Sequence(cg.IntTypeRef()) + if type == AttributeProto.STRINGS: + return cg.TypingRefs.Sequence(cg.StrTypeRef()) + if type == AttributeProto.TENSORS: + return cg.TypingRefs.Sequence(cg.TypeRef(MODULE_ONNX, "TensorProto")) + if type == AttributeProto.SPARSE_TENSORS: + return cg.TypingRefs.Sequence(cg.TypeRef(MODULE_ONNX, "SparseTensorProto")) + if type == AttributeProto.GRAPHS: + return cg.TypingRefs.Sequence(cg.TypeRef(MODULE_ONNX, "GraphProto")) + if type == AttributeProto.TYPE_PROTOS: + return cg.TypingRefs.Sequence(cg.TypeRef(MODULE_ONNX, "TypeProto")) + raise NotImplementedError(f"attribute type not implemented: {type}") + + +def _process_documentation(doc: str): + # Lifted from ONNX's docsgen: + # https://github.com/onnx/onnx/blob/3fd41d249bb8006935aa0031a332dd945e61b7e5/docs/docsgen/source/onnx_sphinx.py#L414 + doc = dedent(doc or "") + main_docs_url = "https://github.com/onnx/onnx/blob/master/" + rep = { + "[the doc](IR.md)": "`ONNX <{0}docs/IR.md>`_", + "[the doc](Broadcasting.md)": "`Broadcasting in ONNX <{0}docs/Broadcasting.md>`_", + "
": "", + "
": "", + "
": "* ", + "
": " ", + "": "", + "
": "", + "": "``", + "": "``", + "
": "\n", + } + for k, v in rep.items(): + doc = doc.replace(k, v.format(main_docs_url)) + move = 0 + lines = [] + for line in doc.split("\n"): + if line.startswith("```"): + if move > 0: + move -= 4 + lines.append("\n") + else: + lines.append("::\n") + move += 4 + elif move > 0: + lines.append(" " * move + line) + else: + lines.append(line) + return "\n".join(lines) diff --git a/opgen/pygen.py b/opgen/pygen.py new file mode 100644 index 0000000000..8a3d3c2c0b --- /dev/null +++ b/opgen/pygen.py @@ -0,0 +1,1483 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +from abc import ABC, abstractmethod +from enum import Enum +from textwrap import TextWrapper, dedent +from typing import ( + Any, + Callable, + Generic, + Iterable, + Optional, + Set, + TextIO, + Tuple, + Type, + TypeVar, + Union, +) + +T = TypeVar("T") +TNode = TypeVar("TNode", bound="Node") +TExpr = TypeVar("TExpr", bound="Expr") +NoneType = type(None) + + +def _assert_instance(instance, expected_type: Union[Type, Tuple[Type, ...]]): + if not isinstance(instance, expected_type): + raise TypeError(f"expected: {expected_type!r}; actual: {instance!r}") + + +__end_of_sequence = StopIteration() + + +def first_or_none(seq: Iterable[T]) -> Optional[T]: + return next(iter(seq), None) + + +def first(seq: Iterable[T]) -> T: + return next(iter(seq)) + + +def single_or_none(seq: Iterable[T]) -> Optional[T]: + i = iter(seq) + value = next(i, __end_of_sequence) + if value is __end_of_sequence: + return None + if next(i, __end_of_sequence) is not __end_of_sequence: + raise StopIteration("sequence contains more than one element") + return value + + +class Role: + def __init__(self, name: str): + _assert_instance(name, str) + self.name = name + + def __str__(self): + return self.name + + +class NodePredicate: + always: "NodePredicate" + + def __init__( + self, + role: Optional[Role] = None, + type_: Optional[Type[TNode]] = None, + func: Optional[Callable[["Node"], bool]] = None, + ): + _assert_instance(role, (Role, NoneType)) + _assert_instance(type_, (type, NoneType)) + self.role = role + self.type = type_ + self.func = func + + def matches(self, node: "Node"): + _assert_instance(node, Node) + matches = True + if self.role: + matches &= node.role is self.role + if self.type: + matches &= isinstance(node, self.type) + if self.func and matches: + matches &= self.func(node) + return matches + + +NodePredicate.always = NodePredicate() + + +class Node(ABC): + # pylint: disable=W0212 + + def __init__(self): + self._role: Optional[Role] = None + self._parent: Optional[Node] = None + self._prev_sibling: Optional[Node] = None + self._next_sibling: Optional[Node] = None + self._first_child: Optional[Node] = None + self._last_child: Optional[Node] = None + self.leading_trivia: Optional[str] = None + self.trailing_trivia: Optional[str] = None + + @property + def qual_name(self) -> str: + names = [] + for ancestor in self.get_ancestors(and_self=True): + names.insert(0, ancestor.name if hasattr(ancestor, "name") else "") + return ".".join(names) + + @property + def parent_module(self) -> Optional["Module"]: + return first_or_none(self.get_ancestors_of_type(Module)) + + @property + def parent(self): + return self._parent + + @property + def role(self): + return self._role + + @property + def prev_sibling(self): + return self._prev_sibling + + @property + def next_sibling(self): + return self._next_sibling + + @property + def first_child(self): + return self._first_child + + @property + def last_child(self): + return self._last_child + + @property + def has_children(self): + return self._first_child is not None + + @property + def children(self) -> Iterable["Node"]: + current_node = self.first_child + while current_node is not None: + # save next then yield to allow removing/replacing nodes while iterating + next_node = current_node.next_sibling + yield current_node + current_node = next_node + + def get_children(self, predicate: NodePredicate) -> Iterable["Node"]: + _assert_instance(predicate, NodePredicate) + yield from filter(predicate.matches, self.children) + + def get_children_in_role(self, role: Role): + _assert_instance(role, Role) + return self.get_children(NodePredicate(role=role)) + + def get_children_of_type(self, type_: Type[TNode]) -> Iterable[TNode]: + _assert_instance(type_, type) + return self.get_children(NodePredicate(type_=type_)) + + def get_ancestors( + self, predicate: Optional[NodePredicate] = None, and_self=False + ) -> Iterable["Node"]: + current_node = self if and_self else self.parent + while current_node: + # save next then yield to allow removing/replacing nodes while iterating + next_node = current_node.parent + if predicate is None or predicate.matches(current_node): + yield current_node + current_node = next_node + + def get_ancestors_in_role(self, role: Role, and_self=False): + _assert_instance(role, Role) + return self.get_ancestors(NodePredicate(role=role), and_self=and_self) + + def get_ancestors_of_type(self, type_: Type[TNode], and_self=False) -> Iterable[TNode]: + _assert_instance(type_, type) + return self.get_ancestors(NodePredicate(type_=type_), and_self=and_self) + + def _set_parent(self, child: "Node"): + if child._parent is not None: + raise ValueError(f"node is already has a parent: {child.parent!r}") + child._parent = self + + def _get_single_child(self, role: Role) -> Optional["Node"]: + return first_or_none(self.get_children_in_role(role)) + + def _set_single_child(self, node: "Node", role: Role): + current_node = self._get_single_child(role) + if current_node: + current_node.replace(node) + else: + self.append_child(node, role) + + def append_children(self, children: Optional[Union["Node", Iterable["Node"]]], role: Role): + _assert_instance(role, Role) + if children is None: + return + + if isinstance(children, Node): + self.append_child(children, role) + else: + for child in children: + self.append_child(child, role) + + def append_child(self, child: "Node", role: Role): + _assert_instance(role, Role) + if child is None: + return + _assert_instance(child, Node) + + self._set_parent(child) + child._role = role + + if self._first_child is None: + self._last_child = child + self._first_child = child + else: + self._last_child._next_sibling = child + child._prev_sibling = self._last_child + self._last_child = child + + def insert_child_before(self, next_sibling: Optional["Node"], child: "Node", role: Role): + _assert_instance(next_sibling, (Node, type(None))) + _assert_instance(child, Node) + _assert_instance(role, Role) + + if next_sibling is None: + self.append_child(child, role) + return + + self._set_parent(child) + child._role = role + child._next_sibling = next_sibling + child._prev_sibling = next_sibling._prev_sibling + + if next_sibling._prev_sibling is None: + self._first_child = child + else: + next_sibling._prev_sibling._next_sibling = child + + next_sibling._prev_sibling = child + + def prepend_child(self, child: "Node", role: Role): + _assert_instance(child, Node) + _assert_instance(role, Role) + self.insert_child_before(self.first_child, child, role) + + def remove(self): + if self._prev_sibling is not None: + self._prev_sibling._next_sibling = self._next_sibling + else: + self._parent._first_child = self._next_sibling + + if self._next_sibling is not None: + self._next_sibling._prev_sibling = self._prev_sibling + else: + self._parent._last_child = self._prev_sibling + + self._parent = None + self._role = None + self._prev_sibling = None + self._next_sibling = None + + def replace(self, new_node: Optional["Node"]): + if new_node is None: + self.remove() + return + + if new_node is self: + return + + if self.parent is None: + raise ValueError("cannot replace root node") + + _assert_instance(new_node, Node) + + if new_node.parent is not None: + if self in new_node.ancestors: + new_node.remove() + else: + raise ValueError(f"node is used in another tree: {new_node!r}") + + new_node._parent = self._parent + new_node._role = self._role + new_node._prev_sibling = self._prev_sibling + new_node._next_sibling = self._next_sibling + + if self._prev_sibling is None: + self._parent._first_child = new_node + else: + self._prev_sibling._next_sibling = new_node + + if self._next_sibling is None: + self._parent._last_child = new_node + else: + self._parent._prev_sibling = new_node + + self._parent = None + self._role = None + self._prev_sibling = None + self._next_sibling = None + + @abstractmethod + def accept(self, visitor: "Visitor"): + pass + + def _dispatch_visit(self, dispatch: Callable[[TNode, "VisitKind"], bool]): + visitor = dispatch.__self__ + visitor.enter(self) + if dispatch(self) is True: + for child in self.children: + child.accept(dispatch.__self__) + visitor.leave(self) + dispatch(self) + else: + visitor.leave(self) + visitor.finish(self) + + +class Expr(Node, ABC): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_expr) + + +class ThunkExpr(Expr): + def __init__(self, code: str): + super().__init__() + self.code = code + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_thunk_expr) + + +class Name(Expr): + def __init__(self, identifier: str): + super().__init__() + self.identifier = identifier + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_name) + + +class Constant(Expr): + def __init__(self, value: Any): + super().__init__() + self.value = value + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_constant) + + +class ExprList(Expr, Generic[TExpr], ABC): + class Roles: + Elements = Role("ExprList.Elements") + + def __init__(self, *elements: TExpr): + super().__init__() + self.append_children(elements, ExprList.Roles.Elements) + + @property + def elements(self) -> Iterable[TExpr]: + return self.get_children_in_role(ExprList.Roles.Elements) + + def append_element(self, element: TExpr): + _assert_instance(element, Expr) + self.append_child(element, ExprList.Roles.Elements) + + +class BinOp(Expr): + class Roles: + Left = Role("BinOp.Left") + Right = Role("BinOp.Right") + + def __init__(self, left: Expr, op: str, right: Expr): + super().__init__() + self.append_child(left, BinOp.Roles.Left) + self.op = op + self.append_child(right, BinOp.Roles.Right) + + @property + def left(self): + return first(self.get_children_in_role(BinOp.Roles.Left)) + + @property + def right(self): + return first(self.get_children_in_role(BinOp.Roles.Right)) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_binop) + + +class Subscript(Expr): + class Roles: + Value = Role("Subscript.Value") + Slice = Role("Subscript.Slice") + + def __init__(self, value: Expr, slice: Expr): + super().__init__() + self.append_child(value, Subscript.Roles.Value) + self.append_child(slice, Subscript.Roles.Slice) + + @property + def value(self): + return first(self.get_children_in_role(Subscript.Roles.Value)) + + @property + def slice(self): + return first(self.get_children_in_role(Subscript.Roles.Slice)) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_subscript) + + +class Starred(Expr): + class Roles: + Expr = Role("Starred.Expr") + + def __init__(self, expr: Expr): + super().__init__() + self.append_child(expr, Starred.Roles.Expr) + + @property + def expr(self): + return first(self.get_children_in_role(Starred.Roles.Expr)) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_starred) + + +class Call(Expr): + class Roles: + Func = Role("Call.Func") + Args = Role("Call.Args") + + def __init__(self, func: Expr, *args: Expr): + super().__init__() + _assert_instance(func, Expr) + self.append_child(func, Call.Roles.Func) + self.append_children(args, Call.Roles.Args) + + @property + def func(self) -> Expr: + return first(self.get_children_in_role(Call.Roles.Func)) + + @property + def args(self) -> Iterable["Expr"]: + return self.get_children_in_role(Call.Roles.Args) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_call) + + +class Lambda(Expr): + class Roles: + Args = Role("Lambda.Args") + Body = Role("Lambda.Body") + + def __init__(self, body: Expr, *args: Arg): + super().__init__() + _assert_instance(body, Expr) + self.append_child(body, Lambda.Roles.Body) + self.append_children(args, Lambda.Roles.Args) + + @property + def body(self) -> Expr: + return first(self.get_children_in_role(Lambda.Roles.Body)) + + @property + def args(self) -> Iterable["Expr"]: + return self.get_children_in_role(Lambda.Roles.Args) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_lambda) + + +class TupleExpr(ExprList): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_tuple_expr) + + +class ListExpr(ExprList): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_list_expr) + + +class SetExpr(ExprList): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_set_expr) + + +class DictElem(Expr): + class Roles: + Key = Role("DictElem.Key") + Value = Role("DictElem.Value") + + def __init__(self, key: Expr, value: Expr): + super().__init__() + _assert_instance(key, Expr) + _assert_instance(value, Expr) + self.append_child(key, DictElem.Roles.Key) + self.append_child(value, DictElem.Roles.Value) + + @property + def key(self) -> Expr: + return first(self.get_children_in_role(DictElem.Roles.Key)) + + @property + def value(self) -> Expr: + return first(self.get_children_in_role(DictElem.Roles.Value)) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_dict_elem) + + +class DictExpr(ExprList[DictElem]): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_dict_expr) + + +class TypeRef(Expr): + class Roles: + TypeArgs = Role("TypeRef.TypeArgs") + + def __init__( + self, + module: Optional[str], + name: str, + *typeargs: "TypeRef", + default_value: Optional[Constant] = None, + ): + super().__init__() + self.module = module + self.name = name + self.default_value = default_value or Constant(None) + self.imported_by: Optional["ImportBase"] = None + self.append_children(typeargs, TypeRef.Roles.TypeArgs) + + @property + def typeargs(self) -> Iterable["TypeRef"]: + return self.get_children_in_role(TypeRef.Roles.TypeArgs) + + def append_typearg(self, typearg: "TypeRef"): + _assert_instance(typearg, TypeRef) + self.append_child(typearg, TypeRef.Roles.TypeArgs) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_typeref) + + @staticmethod + def make_composite_if_multiple( + composite_type: "TypeRef", *typeargs: "TypeRef" + ) -> "TypeRef": + if len(typeargs) == 0: + return NoneTypeRef + elif len(typeargs) == 1: + return typeargs[0] + else: + return composite_type(*typeargs) + + +class BuiltinTypeRef(TypeRef): + def __init__(self, name: str, *typeargs: "TypeRef", **kwargs): + super().__init__(None, name, *typeargs, **kwargs) + + +class NoneTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("None") + + +class BoolTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("bool", default_value=Constant(bool())) + + +class IntTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("int", default_value=Constant(int())) + + +class FloatTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("float", default_value=Constant(float())) + + +class ComplexTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("complex", default_value=Constant(complex())) + + +class StrTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("str") + + +class BytesTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("bytes") + + +class EllipsisTypeRef(BuiltinTypeRef): + def __init__(self): + super().__init__("...") + + +class TypingRefs(ABC): + @abstractmethod + def __init__(self): + pass + + class Any(TypeRef): + def __init__(self): + super().__init__("typing", "Any") + + class Union(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Union", *typeargs) + + class Optional(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Optional", *typeargs) + + class Sequence(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Sequence", *typeargs) + + class Tuple(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Tuple", *typeargs) + + class Mapping(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Mapping", *typeargs) + + class List(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "List", *typeargs) + + class Annotation(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Annotation", *typeargs) + + class Callable(TypeRef): + def __init__(self, *typeargs: TypeRef): + super().__init__("typing", "Callable", *typeargs) + + +class Arg(Node): + class Roles: + Type = Role("Arg.Type") + DefaultValue = Role("Arg.DefaultValue") + + def __init__( + self, + name: str, + type: Optional[TypeRef] = None, + default_value: Optional[Expr] = None, + is_vararg: bool = False, + is_kwarg: bool = False, + doc: Optional[str] = None, + ): + super().__init__() + self.name = name + self.is_vararg = is_vararg + self.is_kwarg = is_kwarg + self.doc = doc + self.append_child(type, Arg.Roles.Type) + self.append_child(default_value, Arg.Roles.DefaultValue) + + @property + def type(self) -> Optional[TypeRef]: + return first_or_none(self.get_children_in_role(Arg.Roles.Type)) + + @property + def default_value(self) -> Optional[Expr]: + return first_or_none(self.get_children_in_role(Arg.Roles.DefaultValue)) + + @default_value.setter + def default_value(self, value: Optional[Expr]): + self._set_single_child(value, Arg.Roles.DefaultValue) + + @property + def has_default_value(self) -> bool: + return self.default_value is not None + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_arg) + + +class Stmt(Node, ABC): + pass + + +class BlockStmt(Stmt, ABC): + pass + + +class Pass(Stmt): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_pass) + + +class ThunkStmt(Stmt): + class Roles: + Thunk = Role("ThunkStmt.Thunk") + + def __init__(self, *thunks: Union[str, Stmt]): + super().__init__() + self.thunk: Optional[str] = None + if len(thunks) == 1 and isinstance(thunks[0], str): + self.thunk = thunks[0] + else: + for thunk in thunks: + if isinstance(thunk, str): + self.append_child(ThunkStmt(thunk), ThunkStmt.Roles.Thunk) + else: + self.append_child(thunk, ThunkStmt.Roles.Thunk) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_thunk_stmt) + + +class FunctionDef(BlockStmt): + class Roles: + Args = Role("FunctionDef.Args") + ReturnType = Role("FunctionDef.ReturnType") + Body = Role("FunctionDef.Body") + + def __init__( + self, + name: str, + *args: Arg, + return_type: Optional[TypeRef] = None, + body: Union[Stmt, Iterable[Stmt]] = (), + doc: Optional[str] = None, + ): + super().__init__() + self.name = name + self.doc = doc + self.append_children(args, FunctionDef.Roles.Args) + self.append_children(return_type, FunctionDef.Roles.ReturnType) + self.append_children(body, FunctionDef.Roles.Body) + + @property + def args(self) -> Iterable[Arg]: + return self.get_children_in_role(FunctionDef.Roles.Args) + + def append_arg(self, base: TypeRef): + _assert_instance(base, TypeRef) + self.append_child(base, FunctionDef.Roles.Args) + + @property + def return_type(self) -> Optional[TypeRef]: + return self._get_single_child(FunctionDef.Roles.ReturnType) + + @return_type.setter + def return_type(self, return_type: Optional[TypeRef]): + self._set_single_child(return_type, FunctionDef.Roles.ReturnType) + + @property + def body(self) -> Iterable[Stmt]: + return self.get_children_in_role(FunctionDef.Roles.Body) + + def append_body(self, stmt: Stmt): + _assert_instance(stmt, Stmt) + self.append_child(stmt, FunctionDef.Roles.Body) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_functiondef) + + +class ClassDef(BlockStmt): + class Roles: + Bases = Role("ClassDef.Bases") + Body = Role("ClassDef.Body") + + def __init__(self, name: str, *body: Stmt, bases: Union[TypeRef, Iterable[TypeRef]] = ()): + super().__init__() + self.name = name + self.append_children(bases, ClassDef.Roles.Bases) + self.append_children(body, ClassDef.Roles.Body) + + @property + def bases(self) -> Iterable[TypeRef]: + return self.get_children_in_role(ClassDef.Roles.Bases) + + def append_base(self, base: TypeRef): + _assert_instance(base, TypeRef) + self.append_child(base, ClassDef.Roles.Bases) + + @property + def body(self) -> Iterable[Stmt]: + return self.get_children_in_role(ClassDef.Roles.Body) + + def make_typeref(self) -> TypeRef: + return TypeRef(self.parent.qual_name if self.parent else None, self.name) + + def append_body(self, stmt: Stmt): + _assert_instance(stmt, Stmt) + self.append_child(stmt, ClassDef.Roles.Body) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_classdef) + + +class Return(Stmt): + class Roles: + Expr = Role("Return.Expr") + + def __init__(self, expr: Expr): + super().__init__() + self.append_child(expr, Return.Roles.Expr) + + @property + def expr(self): + return self._get_single_child(Return.Roles.Expr) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_return) + + +class Assign(Stmt): + class Roles: + Target = Role("Assign.Target") + Value = Role("Assign.Value") + Type = Role("Assign.Type") + + def __init__(self, target: Expr, value: Expr, type: Optional[TypeRef] = None): + super().__init__() + self.target = target + self.value = value + self.type = type + + @property + def target(self) -> Optional[Expr]: + return self._get_single_child(Assign.Roles.Target) + + @target.setter + def target(self, expr: Optional[Expr]): + self._set_single_child(expr, Assign.Roles.Target) + + @property + def value(self) -> Optional[Expr]: + return self._get_single_child(Assign.Roles.Value) + + @value.setter + def value(self, expr: Optional[Expr]): + self._set_single_child(expr, Assign.Roles.Value) + + @property + def type(self) -> Optional[TypeRef]: + return self._get_single_child(Assign.Roles.Type) + + @type.setter + def type(self, expr: Optional[TypeRef]): + self._set_single_child(expr, Assign.Roles.Type) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_assign) + + +class If(BlockStmt): + class Roles: + Condition = Role("If.Condition") + TrueBody = Role("If.TrueBody") + FalseBody = Role("If.FalseBody") + + def __init__( + self, + condition: Expr, + true_body: Iterable[Stmt], + false_body: Optional[Iterable[Stmt]] = None, + ): + super().__init__() + self.condition = condition + self.append_children(true_body, If.Roles.TrueBody) + self.append_children(false_body, If.Roles.FalseBody) + + @property + def condition(self) -> Optional[Expr]: + return self._get_single_child(If.Roles.Condition) + + @condition.setter + def condition(self, expr: Optional[Expr]): + self._set_single_child(expr, If.Roles.Condition) + + @property + def true_body(self) -> Iterable[Stmt]: + return self.get_children_in_role(If.Roles.TrueBody) + + @property + def false_body(self) -> Iterable[Stmt]: + return self.get_children_in_role(If.Roles.FalseBody) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_if) + + +class Raise(Node): + class Roles: + Expr = Role("Raise.Expr") + + def __init__(self, expr: Expr): + super().__init__() + self.append_child(expr, Raise.Roles.Expr) + + @property + def expr(self): + return first(self.get_children_in_role(Raise.Roles.Expr)) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_raise) + + +class Alias(Node): + def __init__(self, name: str, alias: Optional[str] = None): + super().__init__() + self.name = name + self.alias = alias + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_alias) + + +class ImportBase(Stmt, ABC): + class Roles: + Names = Role("ImportBase.Names") + + def __init__(self, *names: Alias): + super().__init__() + self.append_children(names, ImportBase.Roles.Names) + + @property + def names(self) -> Iterable[Alias]: + return self.get_children_in_role(ImportBase.Roles.Names) + + +class Import(ImportBase): + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_import) + + +class ImportFrom(ImportBase): + def __init__(self, module: str, *names: Alias, level: Optional[int] = None): + super().__init__(*names) + self.module = module + self.level = level + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_importfrom) + + +class Module(Node): + class Roles: + Body = Role("Module.Body") + + def __init__(self, *body: Stmt, name: Optional[str] = None): + super().__init__() + self.name = name + self.append_children(body, Module.Roles.Body) + + @property + def body(self) -> Iterable[Stmt]: + return self.get_children_in_role(Module.Roles.Body) + + def append_body(self, *stmts: Node): + self.append_children(stmts, Module.Roles.Body) + + def accept(self, visitor: "Visitor"): + self._dispatch_visit(visitor.visit_module) + + +class VisitKind(Enum): + NONE = 0 + ENTER = 1 + LEAVE = 2 + + +class Visitor: + # pylint: disable=W0613 + + def __init__(self): + self.visit_kind = VisitKind.NONE + self.node_stack = [] + + def enter(self, node: Node): + self.visit_kind = VisitKind.ENTER + self.node_stack.append(node) + + def leave(self, node: Node): + self.visit_kind = VisitKind.LEAVE + + def finish(self, node: Node): + self.visit_kind = VisitKind.NONE + self.node_stack.pop() + + def visit_node(self, node: Node) -> Optional[bool]: + return True + + def visit_expr(self, expr: Expr) -> Optional[bool]: + return self.visit_node(expr) + + def visit_name(self, name: Name) -> Optional[bool]: + return self.visit_expr(name) + + def visit_constant(self, constant: Constant) -> Optional[bool]: + return self.visit_expr(constant) + + def visit_binop(self, binop: BinOp) -> Optional[bool]: + return self.visit_expr(binop) + + def visit_subscript(self, subscript: Subscript) -> Optional[bool]: + return self.visit_expr(subscript) + + def visit_starred(self, starred: Starred) -> Optional[bool]: + return self.visit_expr(starred) + + def visit_call(self, call: Call) -> Optional[bool]: + return self.visit_expr(call) + + def visit_lambda(self, lambda_: Lambda) -> Optional[bool]: + return self.visit_expr(lambda_) + + def visit_expr_list(self, expr_list: ExprList) -> Optional[bool]: + return self.visit_expr(expr_list) + + def visit_thunk_expr(self, thunk: ThunkExpr) -> Optional[bool]: + return self.visit_expr(thunk) + + def visit_tuple_expr(self, tuple: TupleExpr) -> Optional[bool]: + return self.visit_expr_list(tuple) + + def visit_list_expr(self, list: ListExpr) -> Optional[bool]: + return self.visit_expr_list(list) + + def visit_set_expr(self, set: SetExpr) -> Optional[bool]: + return self.visit_expr_list(set) + + def visit_dict_elem(self, elem: DictElem) -> Optional[bool]: + return self.visit_expr(elem) + + def visit_dict_expr(self, dict: DictExpr) -> Optional[bool]: + return self.visit_expr_list(dict) + + def visit_typeref(self, typeref: TypeRef) -> Optional[bool]: + return self.visit_expr(typeref) + + def visit_arg(self, arg: Arg) -> Optional[bool]: + return self.visit_node(arg) + + def visit_stmt(self, stmt: Stmt) -> Optional[bool]: + return self.visit_node(stmt) + + def visit_blockstmt(self, block: BlockStmt) -> Optional[bool]: + return self.visit_stmt(block) + + def visit_pass(self, pass_: Pass) -> Optional[bool]: + return self.visit_stmt(pass_) + + def visit_thunk_stmt(self, thunk: ThunkStmt) -> Optional[bool]: + return self.visit_stmt(thunk) + + def visit_functiondef(self, functiondef: FunctionDef) -> Optional[bool]: + return self.visit_stmt(functiondef) + + def visit_classdef(self, classdef: ClassDef) -> Optional[bool]: + return self.visit_stmt(classdef) + + def visit_return(self, return_: Return) -> Optional[bool]: + return self.visit_stmt(return_) + + def visit_assign(self, assign: Assign) -> Optional[bool]: + return self.visit_stmt(assign) + + def visit_if(self, if_: If) -> Optional[bool]: + return self.visit_stmt(if_) + + def visit_raise(self, raise_: Raise) -> Optional[bool]: + return self.visit_stmt(raise_) + + def visit_alias(self, alias: Alias) -> Optional[bool]: + return self.visit_node(alias) + + def visit_importbase(self, import_: ImportBase) -> Optional[bool]: + return self.visit_stmt(import_) + + def visit_import(self, import_: Import) -> Optional[bool]: + return self.visit_importbase(import_) + + def visit_importfrom(self, importfrom: ImportFrom) -> Optional[bool]: + return self.visit_importbase(importfrom) + + def visit_module(self, module: Module) -> Optional[bool]: + return self.visit_node(module) + + +class FixupVisitor(Visitor, ABC): + pass + + +class PopulateEmptyMemberBodies(FixupVisitor): + def visit_classdef(self, classdef: ClassDef) -> Optional[bool]: + if self.visit_kind is VisitKind.ENTER and not any(classdef.body): + classdef.append_child(Pass(), ClassDef.Roles.Body) + return True + + def visit_functiondef(self, functiondef: FunctionDef) -> Optional[bool]: + if self.visit_kind is VisitKind.ENTER and not any(functiondef.body): + functiondef.append_child(Pass(), FunctionDef.Roles.Body) + return True + + +class NameCollector(Visitor): + def __init__(self, predicate: NodePredicate): + super().__init__() + _assert_instance(predicate, NodePredicate) + self._predicate = predicate + self.names: Set[str] = set() + + def leave(self, node: Node) -> Optional[bool]: + if self._predicate.matches(node) and hasattr(node, "name"): + self.names.add(node.name) + + +class ImportAdjuster(FixupVisitor): + def __init__(self): + super().__init__() + self.naming_conflicts: Set[str] = set() + + def enter(self, node: Node): + if len(self.node_stack) == 0: + collector = NameCollector( + NodePredicate(func=lambda n: isinstance(n, (ClassDef, FunctionDef))) + ) + node.accept(collector) + self.naming_conflicts = collector.names + super().enter(node) + + def leave(self, node: Node): + super().leave(node) + if len(self.node_stack) == 0: + self.naming_conflicts = set() + + def visit_typeref(self, typeref: TypeRef) -> Optional[bool]: + if self.visit_kind is not VisitKind.ENTER or not typeref.module: + return True + + module = first_or_none(typeref.get_ancestors_of_type(Module)) + if module is None: + return True + + def adjust_typeref(import_alias: Optional[str]): + typeref.module = None + if import_alias: + typeref.name = import_alias + + import_from: ImportFrom = None + + # Reuse an existing import if we have one; if so, + # and the imported name is already specified, return + # early as there's nothing to import. In that case, also + # adjust the typeref if the import is aliased due to + # conflict resolution below from a previous pass. + for import_ in filter( + lambda i: i.module == typeref.module, module.get_children_of_type(ImportFrom) + ): + import_from = import_ + for imported_name in filter( + lambda i: i.name in (typeref.name, typeref.name), import_.names + ): + adjust_typeref(imported_name.alias) + return True + + # See if the type name conflicts with other names in the + # module (class and function names). If so, adjust the + # name to create an alias on the import. This rewrites + # conflicts like: + # from typing import Optional + # def Optional(thing: Optional[str]): ... + # To: + # from typing import Optional as _Optional + # def Optional(thing: _Optional[str]): ... + conflict_alias = typeref.name + while conflict_alias in self.naming_conflicts: + conflict_alias = f"_{conflict_alias}" + if conflict_alias == typeref.name: + import_alias = Alias(typeref.name) + else: + import_alias = Alias(typeref.name, conflict_alias) + + # Expand or create the import + if import_from is None: + module.prepend_child(ImportFrom(typeref.module, import_alias), Module.Roles.Body) + else: + import_from.append_child(import_alias, ImportBase.Roles.Names) + + adjust_typeref(conflict_alias) + return True + + +class NodeWriterOptions: + def __init__(self, indent=" ", newline="\n", insert_final_newline=True): + self.indent = indent + self.newline = newline + self.insert_final_newline = insert_final_newline + + +class NodeWriter(Visitor, ABC): + def __init__(self, stream: TextIO, options: Optional[NodeWriterOptions] = None): + super().__init__() + self._stream = stream + self._options = options or NodeWriterOptions() + self._indent_level = 0 + self._last_char = "" + + def enter(self, node: Node): + super().enter(node) + if node.leading_trivia: + self.write(node.leading_trivia) + + def finish(self, node: Node): + super().finish(node) + if node.trailing_trivia: + self.write(node.trailing_trivia) + if self._options.insert_final_newline and len(self.node_stack) == 0: + self.write("\n") + + def indent(self): + self._indent_level += 1 + + def dedent(self): + self._indent_level -= 1 + + def write_indent(self): + self._stream.write(self._options.indent * self._indent_level) + + def _raw_write(self, str: str): + if len(str) > 0: + if self._options.newline != "\n": + self._stream.write(str.replace("\n", self._options.newline)) + else: + self._stream.write(str) + self._last_char = str[-1] + + def write(self, *texts: str, separator: str = "", allow_empty_text: bool = False): + for i, text in enumerate(texts): + if not allow_empty_text and len(text) == 0: + continue + if self._last_char == "\n": + self.write_indent() + if i > 0: + self._raw_write(separator) + if separator == "\n": + self.write_indent() + self._raw_write(text) + + def dispatch_write( + self, + separator: Union[str, Callable[[Node], str]], + nodes: Iterable[Node], + prefix: str = "", + suffix: str = "", + ): + self.write(prefix) + for i, node in enumerate(nodes): + if i > 0: + if callable(separator): + self.write(separator(node)) + else: + self.write(separator) + node.accept(self) + self.write(suffix) + + +class PythonWriter(NodeWriter): + def visit_node(self, node: Node) -> Optional[bool]: + raise NotImplementedError(f"no visitor for node {node}") + + def visit_module(self, module: Module): + def sep(node: Node): + node_is_block = isinstance(node, BlockStmt) + prev_is_block = isinstance(node.prev_sibling, BlockStmt) + if prev_is_block or (node_is_block and not prev_is_block): + return "\n\n\n" + else: + return "\n" + + self.dispatch_write(sep, module.body) + + def visit_alias(self, alias: Alias): + self.write(alias.name) + if alias.alias: + self.write(" as ") + self.write(alias.alias) + + def visit_import(self, import_: Import): + self.write("import ") + self.dispatch_write(", ", import_.names) + + def visit_importfrom(self, importfrom: ImportFrom): + self.write(f"from {importfrom.module} import ") + self.dispatch_write(", ", importfrom.names) + + def visit_typeref(self, typeref: TypeRef): + if typeref.module and len(typeref.module) > 0: + self.write(typeref.module) + self.write(".") + self.write(typeref.name) + if any(typeref.typeargs): + self.write("[") + self.dispatch_write(", ", typeref.typeargs) + self.write("]") + + def visit_arg(self, arg: Arg): + if arg.is_vararg: + self.write("*") + self.write(arg.name) + if arg.type: + self.write(": ") + arg.type.accept(self) + if arg.default_value: + self.write(" = ") + arg.default_value.accept(self) + + def visit_thunk_expr(self, thunk: ThunkExpr): + self.write(thunk.code) + + def visit_name(self, name: Name): + self.write(name.identifier) + + def visit_constant(self, constant: Constant): + self.write( + repr(constant.value) if isinstance(constant.value, str) else str(constant.value) + ) + + def visit_binop(self, binop: BinOp): + binop.left.accept(self) + self.write(f" {binop.op} ") + binop.right.accept(self) + + def visit_subscript(self, subscript: Subscript): + subscript.value.accept(self) + self.write("[") + subscript.slice.accept(self) + self.write("]") + + def visit_starred(self, starred: Starred): + self.write("*") + starred.expr.accept(self) + + def visit_call(self, call: Call): + call.func.accept(self) + self.dispatch_write(", ", call.args, prefix="(", suffix=")") + + def visit_lambda(self, lambda_: Lambda): + self.write("lambda ") + self.dispatch_write(", ", lambda_.args) + self.write(": ") + lambda_.body.accept(self) + + def visit_tuple_expr(self, tuple: TupleExpr): + self.dispatch_write(", ", tuple.elements, prefix="(", suffix=",)") + + def visit_list_expr(self, list: ListExpr): + self.dispatch_write(", ", list.elements, prefix="[", suffix="]") + + def visit_set_expr(self, set: ListExpr): + self.dispatch_write(", ", set.elements, prefix="{", suffix="}") + + def visit_dict_elem(self, elem: DictElem): + elem.key.accept(self) + self.write(": ") + elem.value.accept(self) + + def visit_dict_expr(self, dict: DictExpr): + self.dispatch_write(", ", dict.elements, prefix="{", suffix="}") + + def visit_pass(self, pass_: Pass): + self.write("pass") + + def visit_thunk_stmt(self, thunk: ThunkStmt) -> bool: + if self.visit_kind == VisitKind.ENTER and thunk.thunk: + lines = dedent(thunk.thunk).splitlines() + self.write(*lines, separator="\n", allow_empty_text=True) + if thunk.next_sibling: + self.write("\n") + return True + + def visit_assign(self, assign: Assign): + assign.target.accept(self) + if assign.type: + self.write(": ") + assign.type.accept(self) + self.write(" = ") + assign.value.accept(self) + + def visit_if(self, if_: If): + self.write("if ") + if_.condition.accept(self) + self.write(":\n") + self.indent() + self.dispatch_write("\n", if_.true_body) + self.dedent() + if first_or_none(if_.false_body) is not None: + self.write("else:\n") + self.indent() + self.dispatch_write("\n", if_.false_body) + self.dedent() + + def visit_raise(self, raise_: Raise): + self.write("raise ") + raise_.expr.accept(self) + + def visit_functiondef(self, functiondef: FunctionDef): + self.write("def ", functiondef.name, "(") + self.dispatch_write(", ", functiondef.args) + self.write(")") + if functiondef.return_type: + self.write(" -> ") + functiondef.return_type.accept(self) + self.write(":\n") + self.indent() + if functiondef.doc: + self.write('r"""') + for line in dedent(functiondef.doc).splitlines(): + self.write(line) + self.write("\n") + self.write('"""\n\n') + self.dispatch_write("\n", functiondef.body) + self.dedent() + + def visit_classdef(self, classdef: ClassDef): + self.write("class ", classdef.name) + if any(classdef.bases): + self.write("(") + self.dispatch_write(", ", classdef.bases) + self.write(")") + self.write(":\n") + self.indent() + self.dispatch_write("\n\n", classdef.body) + self.dedent() + + def visit_return(self, return_: Return): + self.write("return ") + return_.expr.accept(self) + + +class DocCommentBuilder(Visitor): + def __init__(self, width: int = 80): + super().__init__() + self.width = width + + def visit_functiondef(self, functiondef: FunctionDef): + def wrap(text: str, initial_indent="", subsequent_indent=""): + return TextWrapper( + width=self.width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + expand_tabs=False, + replace_whitespace=False, + fix_sentence_endings=False, + break_long_words=False, + break_on_hyphens=False, + ).fill(text) + + argsdoc = "" + for arg in functiondef.args: + if arg.doc: + argsdoc += wrap(f"{arg.name}: {arg.doc}", " " * 4, " " * 8) + "\n\n" + if argsdoc: + functiondef.doc += "\n\nArgs:\n" + argsdoc + + if functiondef.doc: + functiondef.doc = functiondef.doc.strip()