From 66233c46d8409b08b8e21e1d739f4ee05377f083 Mon Sep 17 00:00:00 2001 From: isdanni Date: Wed, 1 May 2024 21:04:42 -0500 Subject: [PATCH] Add Swish operator Signed-off-by: isdanni --- docs/Changelog.md | 2371 +---------------- docs/Operators.md | 423 ++- docs/TestCoverage.md | 18 +- onnx/backend/test/case/node/swish.py | 29 + .../test/data/node/test_acos/model.onnx | Bin 99 -> 99 bytes .../data/node/test_acos_example/model.onnx | Bin 91 -> 91 bytes .../test/data/node/test_acosh/model.onnx | Bin 101 -> 101 bytes .../data/node/test_acosh_example/model.onnx | Bin 93 -> 93 bytes .../test/data/node/test_asin/model.onnx | Bin 99 -> 99 bytes .../data/node/test_asin_example/model.onnx | Bin 91 -> 91 bytes .../test/data/node/test_asinh/model.onnx | Bin 101 -> 101 bytes .../data/node/test_asinh_example/model.onnx | Bin 93 -> 93 bytes .../test/data/node/test_atan/model.onnx | Bin 99 -> 99 bytes .../data/node/test_atan_example/model.onnx | Bin 91 -> 91 bytes .../test/data/node/test_atanh/model.onnx | Bin 101 -> 101 bytes .../data/node/test_atanh_example/model.onnx | Bin 93 -> 93 bytes .../test_averagepool_1d_default/model.onnx | Bin 145 -> 145 bytes .../node/test_averagepool_2d_ceil/model.onnx | Bin 189 -> 189 bytes .../test_averagepool_2d_default/model.onnx | Bin 156 -> 156 bytes .../test_averagepool_2d_dilations/model.onnx | Bin 214 -> 214 bytes .../node/test_averagepool_2d_pads/model.onnx | Bin 172 -> 172 bytes .../model.onnx | Bin 216 -> 216 bytes .../model.onnx | Bin 184 -> 184 bytes .../model.onnx | Bin 228 -> 228 bytes .../model.onnx | Bin 216 -> 216 bytes .../model.onnx | Bin 186 -> 186 bytes .../test_averagepool_2d_same_lower/model.onnx | Bin 186 -> 186 bytes .../test_averagepool_2d_same_upper/model.onnx | Bin 186 -> 186 bytes .../test_averagepool_2d_strides/model.onnx | Bin 174 -> 174 bytes .../test_averagepool_3d_default/model.onnx | Bin 166 -> 166 bytes .../model.onnx | Bin 303 -> 303 bytes .../model.onnx | Bin 302 -> 302 bytes .../model.onnx | Bin 303 -> 303 bytes .../model.onnx | Bin 302 -> 302 bytes .../model.onnx | Bin 234 -> 234 bytes .../test_basic_conv_with_padding/model.onnx | Bin 201 -> 201 bytes .../model.onnx | Bin 204 -> 204 bytes .../model.onnx | Bin 308 -> 308 bytes .../model.onnx | Bin 323 -> 323 bytes .../test/data/node/test_bernoulli/model.onnx | Bin 93 -> 93 bytes .../node/test_bernoulli_double/model.onnx | Bin 114 -> 114 bytes .../test_bernoulli_double_expanded/model.onnx | Bin 446 -> 446 bytes .../node/test_bernoulli_expanded/model.onnx | Bin 409 -> 409 bytes .../data/node/test_bernoulli_seed/model.onnx | Bin 114 -> 114 bytes .../test_bernoulli_seed_expanded/model.onnx | Bin 452 -> 452 bytes .../test_conv_with_autopad_same/model.onnx | Bin 226 -> 226 bytes .../model.onnx | Bin 236 -> 236 bytes .../model.onnx | Bin 224 -> 224 bytes .../test_conv_with_strides_padding/model.onnx | Bin 221 -> 221 bytes .../data/node/test_convtranspose/model.onnx | Bin 158 -> 158 bytes .../node/test_convtranspose_1d/model.onnx | Bin 148 -> 148 bytes .../node/test_convtranspose_3d/model.onnx | Bin 173 -> 173 bytes .../model.onnx | Bin 216 -> 216 bytes .../test_convtranspose_dilations/model.onnx | Bin 188 -> 188 bytes .../model.onnx | Bin 266 -> 266 bytes .../model.onnx | Bin 212 -> 212 bytes .../node/test_convtranspose_pad/model.onnx | Bin 205 -> 205 bytes .../node/test_convtranspose_pads/model.onnx | Bin 200 -> 200 bytes .../test/data/node/test_cos/model.onnx | Bin 97 -> 97 bytes .../data/node/test_cos_example/model.onnx | Bin 89 -> 89 bytes .../test/data/node/test_cosh/model.onnx | Bin 99 -> 99 bytes .../data/node/test_cosh_example/model.onnx | Bin 91 -> 91 bytes .../model.onnx | Bin 310 -> 310 bytes .../model.onnx | Bin 286 -> 286 bytes .../test_data_set_0/input_2.pb | 3 +- .../test_data_set_0/input_2.pb | 3 +- .../test/data/node/test_det_2d/model.onnx | Bin 84 -> 84 bytes .../test/data/node/test_det_nd/model.onnx | Bin 92 -> 92 bytes .../data/node/test_dropout_default/model.onnx | Bin 126 -> 126 bytes .../node/test_dropout_default_mask/model.onnx | Bin 160 -> 160 bytes .../model.onnx | Bin 182 -> 182 bytes .../test_dropout_default_ratio/model.onnx | Bin 148 -> 148 bytes .../test/data/node/test_elu/model.onnx | Bin 114 -> 114 bytes .../data/node/test_elu_default/model.onnx | Bin 105 -> 105 bytes .../data/node/test_elu_example/model.onnx | Bin 106 -> 106 bytes .../model.onnx | Bin 148 -> 148 bytes .../node/test_eyelike_with_dtype/model.onnx | Bin 122 -> 122 bytes .../test_eyelike_without_dtype/model.onnx | Bin 111 -> 111 bytes .../node/test_globalaveragepool/model.onnx | Bin 133 -> 133 bytes .../model.onnx | Bin 145 -> 145 bytes .../data/node/test_globalmaxpool/model.onnx | Bin 125 -> 125 bytes .../test_globalmaxpool_precomputed/model.onnx | Bin 137 -> 137 bytes .../test/data/node/test_gridsample/model.onnx | Bin 225 -> 225 bytes .../model.onnx | Bin 217 -> 217 bytes .../node/test_gridsample_bicubic/model.onnx | Bin 184 -> 184 bytes .../model.onnx | Bin 235 -> 235 bytes .../model.onnx | Bin 235 -> 235 bytes .../node/test_gridsample_bilinear/model.onnx | Bin 186 -> 186 bytes .../model.onnx | Bin 237 -> 237 bytes .../model.onnx | Bin 237 -> 237 bytes .../test_gridsample_border_padding/model.onnx | Bin 200 -> 200 bytes .../node/test_gridsample_nearest/model.onnx | Bin 186 -> 186 bytes .../model.onnx | Bin 237 -> 237 bytes .../model.onnx | Bin 237 -> 237 bytes .../model.onnx | Bin 208 -> 208 bytes .../model.onnx | Bin 247 -> 247 bytes .../model.onnx | Bin 247 -> 247 bytes .../model.onnx | Bin 247 -> 247 bytes .../model.onnx | Bin 247 -> 247 bytes .../test_gridsample_zeros_padding/model.onnx | Bin 198 -> 198 bytes .../data/node/test_gru_batchwise/model.onnx | Bin 235 -> 235 bytes .../data/node/test_gru_defaults/model.onnx | Bin 189 -> 189 bytes .../data/node/test_gru_seq_length/model.onnx | Bin 215 -> 215 bytes .../test_gru_with_initial_bias/model.onnx | Bin 222 -> 222 bytes .../data/node/test_hardsigmoid/model.onnx | Bin 146 -> 146 bytes .../node/test_hardsigmoid_default/model.onnx | Bin 121 -> 121 bytes .../node/test_hardsigmoid_example/model.onnx | Bin 138 -> 138 bytes .../test/data/node/test_hardswish/model.onnx | Bin 109 -> 109 bytes .../node/test_hardswish_expanded/model.onnx | Bin 266 -> 266 bytes .../test_data_set_0/input_0.pb | Bin 1901 -> 1901 bytes .../node/test_instancenorm_epsilon/model.onnx | Bin 206 -> 206 bytes .../node/test_instancenorm_example/model.onnx | Bin 187 -> 187 bytes .../node/test_lppool_1d_default/model.onnx | Bin 162 -> 162 bytes .../node/test_lppool_2d_default/model.onnx | Bin 156 -> 156 bytes .../node/test_lppool_2d_dilations/model.onnx | Bin 196 -> 196 bytes .../data/node/test_lppool_2d_pads/model.onnx | Bin 172 -> 172 bytes .../node/test_lppool_2d_same_lower/model.onnx | Bin 186 -> 186 bytes .../node/test_lppool_2d_same_upper/model.onnx | Bin 186 -> 186 bytes .../node/test_lppool_2d_strides/model.onnx | Bin 174 -> 174 bytes .../node/test_lppool_3d_default/model.onnx | Bin 166 -> 166 bytes .../data/node/test_lstm_batchwise/model.onnx | Bin 237 -> 237 bytes .../data/node/test_lstm_defaults/model.onnx | Bin 191 -> 191 bytes .../test_lstm_with_initial_bias/model.onnx | Bin 224 -> 224 bytes .../node/test_lstm_with_peepholes/model.onnx | Bin 377 -> 377 bytes .../node/test_maxpool_1d_default/model.onnx | Bin 137 -> 137 bytes .../data/node/test_maxpool_2d_ceil/model.onnx | Bin 181 -> 181 bytes .../model.onnx | Bin 207 -> 207 bytes .../node/test_maxpool_2d_default/model.onnx | Bin 147 -> 147 bytes .../node/test_maxpool_2d_dilations/model.onnx | Bin 188 -> 188 bytes .../data/node/test_maxpool_2d_pads/model.onnx | Bin 164 -> 164 bytes .../model.onnx | Bin 176 -> 176 bytes .../model.onnx | Bin 208 -> 208 bytes .../model.onnx | Bin 178 -> 178 bytes .../test_maxpool_2d_same_lower/model.onnx | Bin 178 -> 178 bytes .../test_maxpool_2d_same_upper/model.onnx | Bin 178 -> 178 bytes .../node/test_maxpool_2d_strides/model.onnx | Bin 166 -> 166 bytes .../node/test_maxpool_2d_uint8/model.onnx | Bin 165 -> 165 bytes .../node/test_maxpool_3d_default/model.onnx | Bin 158 -> 158 bytes .../node/test_maxpool_3d_dilations/model.onnx | Bin 202 -> 202 bytes .../model.onnx | Bin 215 -> 215 bytes .../model.onnx | Bin 239 -> 239 bytes .../model.onnx | Bin 220 -> 220 bytes .../model.onnx | Bin 244 -> 244 bytes .../model.onnx | Bin 262 -> 262 bytes .../model.onnx | Bin 223 -> 223 bytes .../test/data/node/test_mish/model.onnx | Bin 85 -> 85 bytes .../data/node/test_mish_expanded/model.onnx | Bin 309 -> 309 bytes .../test/data/node/test_nllloss_NC/model.onnx | Bin 181 -> 181 bytes .../node/test_nllloss_NC_expanded/model.onnx | Bin 1487 -> 1487 bytes .../data/node/test_nllloss_NCd1/model.onnx | Bin 187 -> 187 bytes .../test_nllloss_NCd1_expanded/model.onnx | Bin 1702 -> 1702 bytes .../data/node/test_nllloss_NCd1_ii/model.onnx | Bin 211 -> 211 bytes .../test_nllloss_NCd1_ii_expanded/model.onnx | Bin 4799 -> 4799 bytes .../model.onnx | Bin 271 -> 271 bytes .../model.onnx | Bin 6057 -> 6057 bytes .../node/test_nllloss_NCd1_weight/model.onnx | Bin 224 -> 224 bytes .../model.onnx | Bin 2696 -> 2696 bytes .../test_nllloss_NCd1_weight_ii/model.onnx | Bin 248 -> 248 bytes .../model.onnx | Bin 5288 -> 5288 bytes .../data/node/test_nllloss_NCd1d2/model.onnx | Bin 209 -> 209 bytes .../test_nllloss_NCd1d2_expanded/model.onnx | Bin 1579 -> 1579 bytes .../model.onnx | Bin 246 -> 246 bytes .../model.onnx | Bin 6244 -> 6244 bytes .../model.onnx | Bin 212 -> 212 bytes .../model.onnx | Bin 2033 -> 2033 bytes .../model.onnx | Bin 210 -> 210 bytes .../model.onnx | Bin 2013 -> 2013 bytes .../model.onnx | Bin 251 -> 251 bytes .../model.onnx | Bin 2211 -> 2211 bytes .../model.onnx | Bin 254 -> 254 bytes .../model.onnx | Bin 3324 -> 3324 bytes .../model.onnx | Bin 252 -> 252 bytes .../model.onnx | Bin 2730 -> 2730 bytes .../model.onnx | Bin 276 -> 276 bytes .../model.onnx | Bin 5854 -> 5854 bytes .../model.onnx | Bin 280 -> 280 bytes .../model.onnx | Bin 5510 -> 5510 bytes .../model.onnx | Bin 252 -> 252 bytes .../model.onnx | Bin 5445 -> 5445 bytes .../model.onnx | Bin 269 -> 269 bytes .../model.onnx | Bin 3092 -> 3092 bytes .../model.onnx | Bin 266 -> 266 bytes .../model.onnx | Bin 1974 -> 1974 bytes .../test_data_set_0/input_2.pb | Bin 21 -> 19 bytes .../test_data_set_0/input_2.pb | Bin 21 -> 19 bytes .../test_data_set_0/input_2.pb | 3 +- .../test_data_set_0/input_2.pb | 3 +- .../data/node/test_rnn_seq_length/model.onnx | Bin 215 -> 215 bytes .../test_roialign_aligned_false/model.onnx | Bin 352 -> 352 bytes .../test_roialign_aligned_true/model.onnx | Bin 344 -> 344 bytes .../node/test_roialign_mode_max/model.onnx | Bin 363 -> 363 bytes .../test/data/node/test_round/model.onnx | Bin 85 -> 85 bytes .../test/data/node/test_selu/model.onnx | Bin 133 -> 133 bytes .../data/node/test_selu_default/model.onnx | Bin 107 -> 107 bytes .../data/node/test_selu_example/model.onnx | Bin 125 -> 125 bytes .../node/test_simple_rnn_batchwise/model.onnx | Bin 242 -> 242 bytes .../node/test_simple_rnn_defaults/model.onnx | Bin 196 -> 196 bytes .../model.onnx | Bin 229 -> 229 bytes .../test/data/node/test_sin/model.onnx | Bin 97 -> 97 bytes .../data/node/test_sin_example/model.onnx | Bin 89 -> 89 bytes .../test/data/node/test_sinh/model.onnx | Bin 99 -> 99 bytes .../data/node/test_sinh_example/model.onnx | Bin 91 -> 91 bytes .../test/data/node/test_softplus/model.onnx | Bin 107 -> 107 bytes .../node/test_softplus_example/model.onnx | Bin 99 -> 99 bytes .../test/data/node/test_softsign/model.onnx | Bin 107 -> 107 bytes .../node/test_softsign_example/model.onnx | Bin 99 -> 99 bytes .../test/data/node/test_tan/model.onnx | Bin 97 -> 97 bytes .../data/node/test_tan_example/model.onnx | Bin 89 -> 89 bytes .../data/node/test_thresholdedrelu/model.onnx | Bin 138 -> 138 bytes .../test_thresholdedrelu_default/model.onnx | Bin 129 -> 129 bytes .../test_thresholdedrelu_example/model.onnx | Bin 130 -> 130 bytes .../node/test_training_dropout/model.onnx | Bin 160 -> 160 bytes .../test_training_dropout_default/model.onnx | Bin 168 -> 168 bytes .../model.onnx | Bin 201 -> 201 bytes .../test_training_dropout_mask/model.onnx | Bin 193 -> 193 bytes .../model.onnx | Bin 171 -> 171 bytes .../model.onnx | Bin 204 -> 204 bytes onnx/defs/math/defs.cc | 29 + onnx/defs/operator_sets.h | 2 + .../automatic_upgrade_test.py | 3 + 220 files changed, 271 insertions(+), 2616 deletions(-) create mode 100644 onnx/backend/test/case/node/swish.py diff --git a/docs/Changelog.md b/docs/Changelog.md index 15211ad1602..8f253ed0edc 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4629,7 +4629,7 @@ This version of the operator has been available since version 2 of the default O #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -10750,7 +10750,7 @@ This version of the operator has been available since version 11 of the default #### Type Constraints
-
T : tensor(float16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -14497,7 +14497,7 @@ This version of the operator has been available since version 12 of the default ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -15564,7 +15564,7 @@ This version of the operator has been available since version 13 of the default #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double), tensor(bfloat16)
Constrain input and output types to float tensors.
@@ -18593,7 +18593,7 @@ This version of the operator has been available since version 13 of the default #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double), tensor(bfloat16)
Constrain input and output types to float tensors.
@@ -22627,7 +22627,7 @@ This version of the operator has been available since version 18 of the default ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -22955,7 +22955,7 @@ This version of the operator has been available since version 19 of the default
axis : int (default is 1)
-
(Optional) The axis of the dequantizing dimension of the input tensor. Used only for per-axis quantization. Negative value means counting dimensions from the back. Accepted range is `[-r, r-1]` where `r = rank(input)`. When the rank of the input is 1, per-tensor quantization is applied, rendering the axis unnecessary in this scenario.
+
(Optional) The axis of the dequantizing dimension of the input tensor. Ignored for per-tensor quantization. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).
#### Inputs (2 - 3) @@ -24673,9 +24673,9 @@ This version of the operator has been available since version 21 of the default #### Type Constraints
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
+
T1 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
Constrain input types. Casting from complex is not supported.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
+
T2 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
Constrain output types. Casting to complex is not supported.
@@ -25406,7 +25406,7 @@ This version of the operator has been available since version 21 of the default
axis : int (default is 1)
-
(Optional) The axis of the dequantizing dimension of the input tensor. Used only for per-axis and blocked quantization. Negative value means counting dimensions from the back. Accepted range is `[-r, r-1]` where `r = rank(input)`. When the rank of the input is 1, per-tensor quantization is applied, rendering the axis unnecessary in this scenario.
+
(Optional) The axis of the dequantizing dimension of the input tensor. Used for per-axis and blocked quantization. Negative value means counting dimensions from the back. Accepted range is `[-r, r-1]` where `r = rank(input)`.
block_size : int (default is 0)
(Optional) The size of the quantization block (number of times every scale is replicated). Used only for blocked quantization. The block size is a positive integer. Given `x` shape `(D0, ..., Di, ..., Dn)`, `y_scale` shape `(S0, ... Si, ...Sn)` and `axis=i`, the accepted range is `[ceil(Di/Si), ceil(Di/(Si-1))-1]`
output_dtype : int (default is 0)
@@ -25878,2357 +25878,6 @@ This version of the operator has been available since version 21 of the default
Constrain input and output types to all tensor types up to IRv10.
-## Version 22 of the default ONNX operator set -### **Acos-22** - - Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The arccosine of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Acosh-22** - - Calculates the hyperbolic arccosine of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The hyperbolic arccosine values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Asin-22** - - Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The arcsine of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Asinh-22** - - Calculates the hyperbolic arcsine of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The hyperbolic arcsine values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Atan-22** - - Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The arctangent of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Atanh-22** - - Calculates the hyperbolic arctangent of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The hyperbolic arctangent values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **AveragePool-22** - - AveragePool consumes an input tensor X and applies average pooling across - the tensor according to kernel sizes, stride sizes, and pad lengths. - average pooling consisting of computing the average on all values of a - subset of the input tensor according to the kernel size and downsampling the - data into the output tensor Y for further processing. The output spatial shape is calculated differently - depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. - With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): - ``` - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - ``` - or - ``` - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. - - `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: - ``` - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - ``` - or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): - ``` - VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 - ``` - And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: - ``` - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] - ``` - The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). - - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
auto_pad : string (default is NOTSET)
-
auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.
-
ceil_mode : int (default is 0)
-
Whether to use ceil or floor (default) to compute the output shape.
-
count_include_pad : int (default is 0)
-
Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad.
-
dilations : list of ints
-
Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis.
-
kernel_shape : list of ints (required)
-
The size of the kernel along each axis.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Bernoulli-22** - - Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor - containing probabilities p (a value in the range [0,1]) to be used for drawing the binary random number, - where an output of 1 is produced with probability p and an output of 0 is produced with probability (1-p). - - This operator is non-deterministic and may not produce the same values in different - implementations (even if a seed is specified). - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int
-
The data type for the elements of the output tensor. if not specified, we will use the data type of the input tensor.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
- -#### Inputs - -
-
input : T1
-
All values in input have to be in the range:[0, 1].
-
- -#### Outputs - -
-
output : T2
-
The returned output tensor only has values 0 or 1, same shape as input tensor.
-
- -#### Type Constraints - -
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input types to float tensors.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
-
Constrain output types to all numeric tensors and bool tensors.
-
- -### **Conv-22** - - The convolution operator consumes an input tensor and a filter, and - computes the output. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
auto_pad : string (default is NOTSET)
-
auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.
-
dilations : list of ints
-
dilation value along each spatial axis of the filter. If not present, the dilation defaults is 1 along each spatial axis.
-
group : int (default is 1)
-
number of groups input channels and output channels are divided into.
-
kernel_shape : list of ints
-
The shape of the convolution kernel. If not present, should be inferred from input W.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults is 1 along each spatial axis.
-
- -#### Inputs (2 - 3) - -
-
X (differentiable) : T
-
Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].
-
W (differentiable) : T
-
The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. Assuming zero based indices for the shape array, X.shape[1] == (W.shape[1] * group) == C and W.shape[0] mod G == 0. Or in other words FILTER_IN_CHANNEL multiplied by the number of groups should be equal to DATA_CHANNEL and the number of feature maps M should be a multiple of the number of groups G.
-
B (optional, differentiable) : T
-
Optional 1D bias to be added to the convolution, has size of M.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **ConvTranspose-22** - - The convolution transpose operator consumes an input tensor and a filter, - and computes the output. - - If the pads parameter is provided the shape of the output is calculated via the following equation: - - output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i] - - output_shape can also be explicitly specified in which case pads values are auto generated using these equations: - - total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] - If (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) - Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2). - - - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
auto_pad : string (default is NOTSET)
-
auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = input_shape[i] * strides[i]` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.
-
dilations : list of ints
-
dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis.
-
group : int (default is 1)
-
number of groups input channels and output channels are divided into.
-
kernel_shape : list of ints
-
The shape of the convolution kernel. If not present, should be inferred from input W.
-
output_padding : list of ints
-
Additional elements added to the side with higher coordinate indices in the output. Each padding value in "output_padding" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. Note that this attribute doesn't directly affect the computed output values. It only controls the selection of the computed values, so changing this attribute only adds or removes output elements. If "output_shape" is explicitly provided, "output_padding" does not contribute additional size to "output_shape" but participates in the computation of the needed padding amount. This is also called adjs or adjustment in some frameworks.
-
output_shape : list of ints
-
The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads. Note that the output_shape attribute value should not include dimensions for batch size and channels, which are automatically inferred.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
-
- -#### Inputs (2 - 3) - -
-
X (differentiable) : T
-
Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn)
-
W (differentiable) : T
-
The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)
-
B (optional, differentiable) : T
-
Optional 1D bias to be added to the convolution, has size of M.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Cos-22** - - Calculates the cosine of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The cosine of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Cosh-22** - - Calculates the hyperbolic cosine of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The hyperbolic cosine values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **DeformConv-22** - - Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. - This operator specification supports the general N-D case. Note that most common use cases have 2D or 3D data. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dilations : list of ints
-
Dilation value along each spatial axis of the kernel. Default is 1 along each axis.
-
group : int (default is 1)
-
Number of groups the input and output channels, C and oC, are divided into. C and oC must both be divisible by group. Default is 1.
-
kernel_shape : list of ints
-
Shape of the convolution kernel. If not present, it is inferred from the shape of input W.
-
offset_group : int (default is 1)
-
Number of groups of offset. C must be divisible by offset_group. Default is 1.
-
pads : list of ints
-
Padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. The format should be as follows: [x1_begin, x2_begin, ..., x1_end, x2_end, ...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. Default is 0 along each axis.
-
strides : list of ints
-
Stride along each spatial axis. Default is 1 along each axis.
-
- -#### Inputs (3 - 5) - -
-
X : T
-
Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. In general, the shape is (N, C, D1, D2, ... , Dn) for n-dimensional data, where D1 to Dn are the spatial dimension sizes. Most common use cases have n = 2 or 3.
-
W : T
-
Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. For more than 2 dimensions, it has shape (oC, C/group, k1, k2, ... , kn).
-
offset : T
-
Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data or (N, offset_group * k1 * k2 * ... * kn * n, o1, o2, ... , on) for nD data. Use linear interpolationfor fractional offset values. Sampling locations outside of the padded input tensor gives zero.
-
B (optional) : T
-
Optional 1D bias of length oC to be added to the convolution. Default is a tensor of zeros.
-
mask (optional) : T
-
The mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data or (N, offset_group * k1 * k2 * ... * kn * n, o1, o2, ... , on) for nD data. Default is a tensor of ones.
-
- -#### Outputs - -
-
Y : T
-
Output data tensor that contains the result of convolution. It has shape (N, oC, oH, oW) for 2D data or (N, oC, o1, o2, ..., on) for nD data
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Det-22** - - Det calculates determinant of a square matrix or batches of square matrices. - Det takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions, - and the inner-most 2 dimensions form square matrices. - The output is a tensor of shape `[*]`, containing the determinants of all input submatrices. - e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`). - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to floating-point tensors.
-
- -### **Dropout-22** - - Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, - output (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout; - Note that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode, - the user can simply not pass `training_mode` input or set it to false. - ``` - output = scale * data * mask, - ``` - where - ``` - scale = 1. / (1. - ratio). - ``` - This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
seed : int
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
- -#### Inputs (1 - 3) - -
-
data (differentiable) : T
-
The input data as Tensor.
-
ratio (optional, non-differentiable) : T1
-
The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it's non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.
-
training_mode (optional, non-differentiable) : T2
-
If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.
-
- -#### Outputs (1 - 2) - -
-
output (differentiable) : T
-
The output.
-
mask (optional, non-differentiable) : T2
-
The output mask.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
-
Constrain input and output types to float tensors.
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
-
Constrain input 'ratio' types to float tensors.
-
T2 : tensor(bool)
-
Constrain output 'mask' types to boolean tensors.
-
- -### **Elu-22** - - Elu takes one input data (Tensor) and produces one output data - (Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x < - 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. - - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
alpha : float (default is 1.0)
-
Coefficient of ELU.
-
- -#### Inputs - -
-
X (differentiable) : T
-
1D input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
1D output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **EyeLike-22** - - Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D - tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the - same as the input tensor. The data type can be specified by the 'dtype' argument. If - 'dtype' is not specified, then the type of input tensor is used. By default, the main diagonal - is populated with ones, but attribute 'k' can be used to populate upper or lower diagonals. - The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the - TensorProto message and be valid as an output type. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int
-
(Optional) The data type for the elements of the output tensor. If not specified,the data type of the input tensor T1 is used. If input tensor T1 is also notspecified, then type defaults to 'float'.
-
k : int (default is 0)
-
(Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal.
-
- -#### Inputs - -
-
input : T1
-
2D input tensor to copy shape, and optionally, type information from.
-
- -#### Outputs - -
-
output : T2
-
Output tensor, same shape as input tensor T1.
-
- -#### Type Constraints - -
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
-
Constrain input types. Strings and complex are not supported.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
-
Constrain output types. Strings and complex are not supported.
-
- -### **GRU-22** - - Computes an one-layer GRU. This operator is usually supported via some custom - implementation such as CuDNN. - - Notations: - - * `X` - input tensor - * `z` - update gate - * `r` - reset gate - * `h` - hidden gate - * `t` - time step (t-1 means previous time step) - * `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates - * `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates - * `Wb[zrh]` - W bias vectors for update, reset, and hidden gates - * `Rb[zrh]` - R bias vectors for update, reset, and hidden gates - * `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates - * `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates - * `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates - * `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates - * `H` - Hidden state - * `num_directions` - 2 if direction == bidirectional else 1 - - Activation functions: - - * Relu(x) - max(0, x) - * Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - * Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: - Below are optional - - * Affine(x) - alpha * x + beta - * LeakyRelu(x) - x if x >= 0 else alpha * x - * ThresholdedRelu(x) - x if x >= alpha else 0 - * ScaledTanh(x) - alpha * Tanh(beta * x) - * HardSigmoid(x) - min(max(alpha * x + beta, 0), 1) - * Elu(x) - x if x >= 0 else alpha * (e^x - 1) - * Softsign(x) - x/(1 + |x|) - * Softplus(x) - log(1 + e^x) - - Equations (Default: f=Sigmoid, g=Tanh): - - * zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) - * rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) - * ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 - * ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 - * Ht = (1 - zt) (.) ht + zt (.) Ht-1 - This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
activation_alpha : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.
-
activation_beta : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.
-
activations : list of strings
-
A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.
-
clip : float
-
Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.
-
direction : string (default is forward)
-
Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.
-
hidden_size : int
-
Number of neurons in the hidden layer
-
layout : int (default is 0)
-
The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size].
-
linear_before_reset : int (default is 0)
-
When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate.
-
- -#### Inputs (3 - 6) - -
-
X (differentiable) : T
-
The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.
-
W (differentiable) : T
-
The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`.
-
R (differentiable) : T
-
The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`.
-
B (optional, differentiable) : T
-
The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0
-
sequence_lens (optional, non-differentiable) : T1
-
Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.
-
initial_h (optional, non-differentiable) : T
-
Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.
-
- -#### Outputs (0 - 2) - -
-
Y (optional, differentiable) : T
-
A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`.
-
Y_h (optional, differentiable) : T
-
The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
T1 : tensor(int32)
-
Constrain seq_lens to integer tensor.
-
- -### **GlobalAveragePool-22** - - GlobalAveragePool consumes an input tensor X and applies average pooling across - the values in the same channel. This is equivalent to AveragePool with kernel size - equal to the spatial dimension of input tensor. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **GlobalLpPool-22** - - GlobalLpPool consumes an input tensor X and applies lp pool pooling across - the values in the same channel. This is equivalent to LpPool with kernel size - equal to the spatial dimension of input tensor. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
p : int (default is 2)
-
p value of the Lp norm used to pool over the input data.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.
-
- -#### Type Constraints - -
-
T : tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **GlobalMaxPool-22** - - GlobalMaxPool consumes an input tensor X and applies max pooling across - the values in the same channel. This is equivalent to MaxPool with kernel size - equal to the spatial dimension of input tensor. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **GridSample-22** - - Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from the `grid`. - For spatial input `X` with shape (N, C, H, W), the `grid` will have shape (N, H_out, W_out, 2), - the output `Y` will have shape (N, C, H_out, W_out). For volumetric input `X` with shape (N, C, D, H, W), - the `grid` will have shape (N, D_out, H_out, W_out, 3), the output `Y` will have shape (N, C, D_out, H_out, W_out). - More generally, for an input `X` of rank r+2 with shape (N, C, d1, d2, ..., dr), - the `grid` will have shape (N, D1_out, D2_out, ..., Dr_out, r), the output `Y` will have shape (N, C, D1_out, D2_out, ..., Dr_out). - - The tensor `X` contains values at centers of square pixels (voxels, etc) locations such as (n, c, d1_in, d2_in, ..., dr_in). - The (n, d1_out, d2_out, ..., dr_out, :) values from the tensor `grid` are the normalized positions for interpolating the values - at the (n, c, d1_out, d2_out, ..., dr_out) locations from the output tensor `Y` using a specified interpolation method (the mode) - and a padding mode (for `grid` positions falling outside the 2-dimensional image). - - For example, the values in `grid[n, h_out, w_out, :]` are size-2 vectors specifying normalized positions in the 2-dimensional space of `X`. - They are used to interpolate output values of `Y[n, c, h_out, w_out]`. - - The GridSample operator is often used in doing grid generator and sampler in the - [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). - See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html). - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
align_corners : int (default is 0)
-
If align_corners=1, the extrema (-1 and 1) are considered as referring to the center points of the input's corner pixels (voxels, etc.). If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels (voxels, etc.), making the sampling more resolution agnostic.
-
mode : string (default is linear)
-
Three interpolation modes: linear (default), nearest and cubic. The "linear" mode includes linear and N-linear interpolation modes depending on the number of spatial dimensions of the input tensor (i.e. linear for 1 spatial dimension, bilinear for 2 spatial dimensions, etc.). The "cubic" mode also includes N-cubic interpolation modes following the same rules. The "nearest" mode rounds to the nearest even index when the sampling point falls halfway between two indices.
-
padding_mode : string (default is zeros)
-
Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. zeros: use 0 for out-of-bound grid locations, border: use border values for out-of-bound grid locations, reflection: use values at locations reflected by the border for out-of-bound grid locations. If index 0 represents the margin pixel, the reflected value at index -1 will be the same as the value at index 1. For location far away from the border, it will keep being reflected until becoming in bound. If pixel location x = -3.5 reflects by border -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = 0.5.
-
- -#### Inputs - -
-
X (differentiable) : T1
-
Input tensor of rank r+2 that has shape (N, C, D1, D2, ..., Dr), where N is the batch size, C is the number of channels, D1, D2, ..., Dr are the spatial dimensions.
-
grid (non-differentiable) : T2
-
Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, D2_out, ..., Dr_out are the spatial dimensions of the grid and output, and r is the number of spatial dimensions. Grid specifies the sampling locations normalized by the input spatial dimensions. Therefore, it should have most values in the range of [-1, 1]. If the grid has values outside the range of [-1, 1], the corresponding outputs will be handled as defined by padding_mode. Following computer vision convention, the coordinates in the length-r location vector are listed from the innermost tensor dimension to the outermost, the opposite of regular tensor indexing.
-
- -#### Outputs - -
-
Y (differentiable) : T1
-
Output tensor of rank r+2 that has shape (N, C, D1_out, D2_out, ..., Dr_out) of the sampled values. For integer input types, intermediate values are computed as floating point and cast to integer at the end.
-
- -#### Type Constraints - -
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
-
Constrain input `X` and output `Y` types to all tensor types.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain grid types to float tensors.
-
- -### **HardSigmoid-22** - - HardSigmoid takes one input data (Tensor) and produces one output data - (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), - is applied to the tensor elementwise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
alpha : float (default is 0.2)
-
Value of alpha.
-
beta : float (default is 0.5)
-
Value of beta.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **HardSwish-22** - - HardSwish takes one input data (Tensor) and produces one output data (Tensor) where - the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid(x), - where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **InstanceNormalization-22** - - Carries out instance normalization as described in the paper - https://arxiv.org/abs/1607.08022. - - y = scale * (x - mean) / sqrt(variance + epsilon) + B, - where mean and variance are computed per instance per channel. - - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
epsilon : float (default is 1e-05)
-
The epsilon value to use to avoid division by zero.
-
- -#### Inputs - -
-
input (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
-
scale (differentiable) : T
-
The input 1-dimensional scale tensor of size C.
-
B (differentiable) : T
-
The input 1-dimensional bias tensor of size C.
-
- -#### Outputs - -
-
output (differentiable) : T
-
The output tensor of the same shape as input.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **LSTM-22** - - Computes an one-layer LSTM. This operator is usually supported via some - custom implementation such as CuDNN. - - Notations: - - * `X` - input tensor - * `i` - input gate - * `o` - output gate - * `f` - forget gate - * `c` - cell gate - * `t` - time step (t-1 means previous time step) - * `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates - * `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates - * `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates - * `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates - * `P[iof]` - P peephole weight vector for input, output, and forget gates - * `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates - * `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates - * `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates - * `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates - * `PB[iof]` - P peephole weight vector for backward input, output, and forget gates - * `H` - Hidden state - * `num_directions` - 2 if direction == bidirectional else 1 - - Activation functions: - - * Relu(x) - max(0, x) - * Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - * Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: Below are optional - - * Affine(x) - alpha*x + beta - * LeakyRelu(x) - x if x >= 0 else alpha * x - * ThresholdedRelu(x) - x if x >= alpha else 0 - * ScaledTanh(x) - alpha*Tanh(beta*x) - * HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - * Elu(x) - x if x >= 0 else alpha*(e^x - 1) - * Softsign(x) - x/(1 + |x|) - * Softplus(x) - log(1 + e^x) - - Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): - - * it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) - * ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) - * ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) - * Ct = ft (.) Ct-1 + it (.) ct - * ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) - * Ht = ot (.) h(Ct) - This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
activation_alpha : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.
-
activation_beta : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.
-
activations : list of strings
-
A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.
-
clip : float
-
Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.
-
direction : string (default is forward)
-
Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.
-
hidden_size : int
-
Number of neurons in the hidden layer
-
input_forget : int (default is 0)
-
Couple the input and forget gates if 1.
-
layout : int (default is 0)
-
The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, Y_c. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape = [batch_size, num_directions, hidden_size].
-
- -#### Inputs (3 - 8) - -
-
X (differentiable) : T
-
The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.
-
W (differentiable) : T
-
The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`.
-
R (differentiable) : T
-
The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`.
-
B (optional, differentiable) : T
-
The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0.
-
sequence_lens (optional, non-differentiable) : T1
-
Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.
-
initial_h (optional, non-differentiable) : T
-
Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.
-
initial_c (optional, non-differentiable) : T
-
Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.
-
P (optional, differentiable) : T
-
The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0.
-
- -#### Outputs (0 - 3) - -
-
Y (optional, differentiable) : T
-
A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`.
-
Y_h (optional, differentiable) : T
-
The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.
-
Y_c (optional, differentiable) : T
-
The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
T1 : tensor(int32)
-
Constrain seq_lens to integer tensor.
-
- -### **LpNormalization-22** - - Given a matrix, apply Lp-normalization along the provided axis. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
axis : int (default is -1)
-
The axis on which to apply normalization, -1 mean last axis.
-
p : int (default is 2)
-
The order of the normalization, only 1 or 2 are supported.
-
- -#### Inputs - -
-
input (differentiable) : T
-
Input matrix
-
- -#### Outputs - -
-
output (differentiable) : T
-
Matrix after normalization
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **LpPool-22** - - LpPool consumes an input tensor X and applies Lp pooling across - the tensor according to kernel sizes, stride sizes, and pad lengths. - Lp pooling consisting of computing the Lp norm on all values of a subset - of the input tensor according to the kernel size and downsampling the - data into the output tensor Y for further processing. The output spatial shape will be following: - ``` - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) - ``` - or - ``` - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) - ``` - if ceil_mode is enabled `pad_shape[i]` is the sum of pads along axis `i`. - - `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: - ``` - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - ``` - And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: - ``` - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] - ``` - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
auto_pad : string (default is NOTSET)
-
auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.
-
ceil_mode : int (default is 0)
-
Whether to use ceil or floor (default) to compute the output shape.
-
dilations : list of ints
-
dilation value along each spatial axis of the filter. If not present, the dilation defaults is 1 along each spatial axis.
-
kernel_shape : list of ints (required)
-
The size of the kernel along each axis.
-
p : int (default is 2)
-
p value of the Lp norm used to pool over the input data.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **MaxPool-22** - - MaxPool consumes an input tensor X and applies max pooling across - the tensor according to kernel sizes, stride sizes, and pad lengths. - max pooling consisting of computing the max on all values of a - subset of the input tensor according to the kernel size and downsampling the - data into the output tensor Y for further processing. The output spatial shape is calculated differently - depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. - With explicit padding (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): - ``` - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - ``` - or - ``` - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. - - `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: - ``` - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - ``` - or when ceil_mode is disabled (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): - ``` - VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 - ``` - And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: - ``` - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] - ``` - The output of each pooling window is maximum number of elements exclude pad. - - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
auto_pad : string (default is NOTSET)
-
auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. The padding is split between the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.
-
ceil_mode : int (default is 0)
-
Whether to use ceil or floor (default) to compute the output shape.
-
dilations : list of ints
-
Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis.
-
kernel_shape : list of ints (required)
-
The size of the kernel along each axis.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
storage_order : int (default is 0)
-
The storage order of the tensor. 0 is row major, and 1 is column major. This attribute is used only to convert an n-tuple index value into a single integer value for producing the second output.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].
-
- -#### Outputs (1 - 2) - -
-
Y (differentiable) : T
-
Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used
-
Indices (optional, non-differentiable) : I
-
Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn).
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8)
-
Constrain input and output types to float and 8 bit tensors.
-
I : tensor(int64)
-
Constrain index tensor to int64
-
- -### **MaxRoiPool-22** - - ROI max pool consumes an input tensor X and region of interests (RoIs) to - apply max pooling across each RoI, to produce output 4-D tensor of shape - (num_rois, channels, pooled_shape[0], pooled_shape[1]). - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
pooled_shape : list of ints (required)
-
ROI pool output shape (height, width).
-
spatial_scale : float (default is 1.0)
-
Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling.
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.
-
rois (non-differentiable) : T
-
RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...].
-
- -#### Outputs - -
-
Y (differentiable) : T
-
RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **MaxUnpool-22** - - MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the output information from a MaxPool op. The first - input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) - from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corresponding - to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. - The third (optional) input is a tensor that specifies the output size of the unpooling operation. - - MaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal - values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling - the result of an unpooling operation should give back the original input to the unpooling op. - - MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous. - The third input argument, output_size, is meant to disambiguate the op and produce output tensor of - known/predictable size. - - In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads, - which define the exact unpooling op. The attributes typically have the same values as the corresponding - pooling op that the unpooling op is trying to invert. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
kernel_shape : list of ints (required)
-
The size of the kernel along each axis.
-
pads : list of ints
-
Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.
-
strides : list of ints
-
Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
-
- -#### Inputs (2 - 3) - -
-
X (differentiable) : T1
-
Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].
-
I (non-differentiable) : T2
-
Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x ... x Dn).
-
output_shape (optional, non-differentiable) : T2
-
The shape of the output can be explicitly set which will cause pads values to be auto generated. If 'output_shape' is specified, 'pads' values are ignored.
-
- -#### Outputs - -
-
output (differentiable) : T1
-
Output data tensor that contains the result of the unpooling.
-
- -#### Type Constraints - -
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
T2 : tensor(int64)
-
Constrain index tensor to int64
-
- -### **Mish-22** - - Mish: A Self Regularized Non-Monotonic Neural Activation Function. - - Perform the linear unit element-wise on the input tensor X using formula: - - ``` - mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) - ``` - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input X and output types to float tensors.
-
- -### **Multinomial-22** - - Generate a tensor of samples from a multinomial distribution according to the probabilities - of each of the possible outcomes. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int (default is 6)
-
(Optional) The data type for the elements of the output tensor, if not specified, we will use int32.
-
sample_size : int (default is 1)
-
Number of times to sample.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
- -#### Inputs - -
-
input : T1
-
Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.
-
- -#### Outputs - -
-
output : T2
-
Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.
-
- -#### Type Constraints - -
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input types to float tensors.
-
T2 : tensor(int32), tensor(int64)
-
Constrain output types to integral tensors.
-
- -### **NegativeLogLikelihoodLoss-22** - - A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. - Its "input" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0. - The "input" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C). - The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes) - or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples. - The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as: - - ``` - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. - ``` - - When an optional "weight" is provided, the sample loss is calculated as: - - ``` - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. - ``` - - loss is zero for the case when target-value equals ignore_index. - - ``` - loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index - ``` - - If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk). - If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged: - - ``` - mean(loss), if "weight" is not provided, - ``` - - or if weight is provided, - - ``` - sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. - ``` - - If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`. - - See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. - - Example 1: - - ``` - // negative log likelihood loss, "none" reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] - - // print(loss) - // [[-3. -2.] - // [-0. -2.]] - ``` - - Example 2: - - ``` - // weighted negative log likelihood loss, sum reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - - loss = np.sum(loss) - // print(loss) - // -1.1 - ``` - - Example 3: - - ``` - // weighted negative log likelihood loss, mean reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - weight_total = 0 - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - weight_total = weight_total + weight[c] - - loss = np.sum(loss) / weight_total - // print(loss) - // -1.57 - ``` - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
ignore_index : int
-
Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value.
-
reduction : string (default is mean)
-
Type of reduction to apply to loss: none, sum, mean (default). 'none': the output is the loss for each sample. 'sum': the output will be summed. 'mean': the sum of the output will be divided by the sum of applied weights.
-
- -#### Inputs (2 - 3) - -
-
input (differentiable) : T
-
Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk).
-
target (non-differentiable) : Tind
-
Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.
-
weight (optional, non-differentiable) : T
-
Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.
-
- -#### Outputs - -
-
loss (differentiable) : T
-
The negative log likelihood loss
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input, weight, and output types to floating-point tensors.
-
Tind : tensor(int32), tensor(int64)
-
Constrain target to integer types
-
- -### **RNN-22** - - Computes an one-layer simple RNN. This operator is usually supported - via some custom implementation such as CuDNN. - - Notations: - - * `X` - input tensor - * `i` - input gate - * `t` - time step (t-1 means previous time step) - * `Wi` - W parameter weight matrix for input gate - * `Ri` - R recurrence weight matrix for input gate - * `Wbi` - W parameter bias vector for input gate - * `Rbi` - R parameter bias vector for input gate - * `WBi` - W parameter weight matrix for backward input gate - * `RBi` - R recurrence weight matrix for backward input gate - * `WBbi` - WR bias vectors for backward input gate - * `RBbi` - RR bias vectors for backward input gate - * `H` - Hidden state - * `num_directions` - 2 if direction == bidirectional else 1 - - Activation functions: - - * Relu(x) - max(0, x) - * Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - * Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: Below are optional - - * Affine(x) - alpha*x + beta - * LeakyRelu(x) - x if x >= 0 else alpha * x - * ThresholdedRelu(x) - x if x >= alpha else 0 - * ScaledTanh(x) - alpha*Tanh(beta*x) - * HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - * Elu(x) - x if x >= 0 else alpha*(e^x - 1) - * Softsign(x) - x/(1 + |x|) - * Softplus(x) - log(1 + e^x) - - Equations (Default: f=Tanh): - - * Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) - This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
activation_alpha : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.
-
activation_beta : list of floats
-
Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.
-
activations : list of strings (default is ['Tanh', 'Tanh'])
-
One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified.
-
clip : float
-
Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.
-
direction : string (default is forward)
-
Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.
-
hidden_size : int
-
Number of neurons in the hidden layer
-
layout : int (default is 0)
-
The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the following shapes are expected: X.shape = [seq_length, batch_size, input_size], Y.shape = [seq_length, num_directions, batch_size, hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. If 1, the following shapes are expected: X.shape = [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size].
-
- -#### Inputs (3 - 6) - -
-
X (differentiable) : T
-
The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.
-
W (differentiable) : T
-
The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`.
-
R (differentiable) : T
-
The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`.
-
B (optional, differentiable) : T
-
The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0.
-
sequence_lens (optional, non-differentiable) : T1
-
Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.
-
initial_h (optional, non-differentiable) : T
-
Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.
-
- -#### Outputs (0 - 2) - -
-
Y (optional, differentiable) : T
-
A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`.
-
Y_h (optional, differentiable) : T
-
The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
T1 : tensor(int32)
-
Constrain seq_lens to integer tensor.
-
- -### **RandomNormal-22** - - Generate a tensor with random values drawn from a normal distribution. The shape - of the tensor is specified by the `shape` argument and the parameter of the normal distribution - specified by `mean` and `scale`. - - The data type is specified by the 'dtype' argument. The 'dtype' argument must - be one of the data types specified in the 'DataType' enum field in the - TensorProto message. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int (default is 1)
-
The data type for the elements of the output tensor. Default is TensorProto::FLOAT.
-
mean : float (default is 0.0)
-
The mean of the normal distribution.
-
scale : float (default is 1.0)
-
The standard deviation of the normal distribution.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
shape : list of ints (required)
-
The shape of the output tensor.
-
- -#### Inputs - - -#### Outputs - -
-
output : T
-
Output tensor of random values drawn from normal distribution
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain output types to float tensors.
-
- -### **RandomNormalLike-22** - - Generate a tensor with random values drawn from a normal distribution. - The shape of the output tensor is copied from the shape of the input tensor, - and the parameters of the normal distribution are specified by `mean` and `scale`. - - The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. - The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the - TensorProto message, and be valid as an output type. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int
-
(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor.
-
mean : float (default is 0.0)
-
The mean of the normal distribution.
-
scale : float (default is 1.0)
-
The standard deviation of the normal distribution.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
- -#### Inputs - -
-
input : T1
-
Input tensor to copy shape and optionally type information from.
-
- -#### Outputs - -
-
output : T2
-
Output tensor of random values drawn from normal distribution
-
- -#### Type Constraints - -
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
-
Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain output types to float tensors.
-
- -### **RandomUniform-22** - - Generate a tensor with random values drawn from a uniform distribution. The shape - of the tensor is specified by the `shape` argument and the range by `low` and `high`. - - The data type is specified by the 'dtype' argument. The 'dtype' argument must - be one of the data types specified in the 'DataType' enum field in the - TensorProto message. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int (default is 1)
-
The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT.
-
high : float (default is 1.0)
-
Upper boundary of the output values.
-
low : float (default is 0.0)
-
Lower boundary of the output values.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
shape : list of ints (required)
-
The shape of the output tensor.
-
- -#### Inputs - - -#### Outputs - -
-
output : T
-
Output tensor of random values drawn from uniform distribution
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain output types to float tensors.
-
- -### **RandomUniformLike-22** - - Generate a tensor with random values drawn from a uniform distribution. - The shape of the output tensor is copied from the shape of the input tensor, - and the parameters of the uniform distribution are specified by `low` and `high`. - - The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. - The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the - TensorProto message and be valid as an output type. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
dtype : int
-
(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor.
-
high : float (default is 1.0)
-
Upper boundary of the output values.
-
low : float (default is 0.0)
-
Lower boundary of the output values.
-
seed : float
-
(Optional) Seed to the random generator, if not specified we will auto generate one.
-
- -#### Inputs - -
-
input : T1
-
Input tensor to copy shape and optionally type information from.
-
- -#### Outputs - -
-
output : T2
-
Output tensor of random values drawn from uniform distribution
-
- -#### Type Constraints - -
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
-
Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain output types to float tensors.
-
- -### **RoiAlign-22** - - Region of Interest (RoI) align operation described in the - [Mask R-CNN paper](https://arxiv.org/abs/1703.06870). - RoiAlign consumes an input tensor X and region of interests (rois) - to apply pooling across each RoI; it produces a 4-D tensor of shape - (num_rois, C, output_height, output_width). - - RoiAlign is proposed to avoid the misalignment by removing - quantizations while converting from original image into feature - map and from feature map into RoI feature; in each ROI bin, - the value of the sampled locations are computed directly - through bilinear interpolation. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
coordinate_transformation_mode : string (default is half_pixel)
-
Allowed values are 'half_pixel' and 'output_half_pixel'. Use the value 'half_pixel' to pixel shift the input coordinates by -0.5 (the recommended behavior). Use the value 'output_half_pixel' to omit the pixel shift for the input (use this for a backward-compatible behavior).
-
mode : string (default is avg)
-
The pooling method. Two modes are supported: 'avg' and 'max'. Default is 'avg'.
-
output_height : int (default is 1)
-
default 1; Pooled output Y's height.
-
output_width : int (default is 1)
-
default 1; Pooled output Y's width.
-
sampling_ratio : int (default is 0)
-
Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0.
-
spatial_scale : float (default is 1.0)
-
Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f.
-
- -#### Inputs - -
-
X : T1
-
Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.
-
rois : T1
-
RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the 'batch_indices' input.
-
batch_indices : T2
-
1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.
-
- -#### Outputs - -
-
Y : T1
-
RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].
-
- -#### Type Constraints - -
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain types to float tensors.
-
T2 : tensor(int64)
-
Constrain types to int tensors.
-
- -### **Round-22** - - Round takes one input Tensor and rounds the values, element-wise, meaning - it finds the nearest integer for each value. - In case of halves, the rule is to round them to the nearest even integer. - If input x is integral, +0, -0, NaN, or infinite, x itself is returned. - The output tensor has the same shape and type as the input. - - Examples: - ``` - round([0.9]) = [1.0] - round([2.5]) = [2.0] - round([2.3]) = [2.0] - round([1.5]) = [2.0] - round([-4.5]) = [-4.0] - ``` - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (non-differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (non-differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Selu-22** - - Selu takes one input data (Tensor) and produces one output data - (Tensor) where the scaled exponential linear unit function, - `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`, - is applied to the tensor elementwise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
alpha : float (default is 1.67326)
-
Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717).
-
gamma : float (default is 1.0507)
-
Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946).
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Sin-22** - - Calculates the sine of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The sine of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Sinh-22** - - Calculates the hyperbolic sine of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The hyperbolic sine values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Softplus-22** - - Softplus takes one input data (Tensor) and produces one output data - (Tensor) where the softplus function, y = ln(exp(x) + 1), is applied to - the tensor elementwise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
X (differentiable) : T
-
1D input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
1D input tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Softsign-22** - - Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The softsign (x/(1+|x|)) values of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **Tan-22** - - Calculates the tangent of the given input tensor, element-wise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Inputs - -
-
input (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
output (differentiable) : T
-
The tangent of the input tensor computed element-wise
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- -### **ThresholdedRelu-22** - - ThresholdedRelu takes one input data (Tensor) and produces one output data - (Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, - is applied to the tensor elementwise. - -#### Version - -This version of the operator has been available since version 22 of the default ONNX operator set. - -#### Attributes - -
-
alpha : float (default is 1.0)
-
Threshold value
-
- -#### Inputs - -
-
X (differentiable) : T
-
Input tensor
-
- -#### Outputs - -
-
Y (differentiable) : T
-
Output tensor
-
- -#### Type Constraints - -
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
-
Constrain input and output types to float tensors.
-
- # ai.onnx.preview.training ## Version 1 of the 'ai.onnx.preview.training' operator set ### **ai.onnx.preview.training.Adagrad-1** diff --git a/docs/Operators.md b/docs/Operators.md index 42d81c252bd..372e42a4ac7 100644 --- a/docs/Operators.md +++ b/docs/Operators.md @@ -12,17 +12,17 @@ For an operator input/output's differentiability, it can be differentiable, |**Operator**|**Since version**|| |-|-|-| |Abs|13, 6, 1| -|Acos|22, 7| -|Acosh|22, 9| +|Acos|7| +|Acosh|9| |Add|14, 13, 7, 6, 1| |And|7, 1| |ArgMax|13, 12, 11, 1| |ArgMin|13, 12, 11, 1| -|Asin|22, 7| -|Asinh|22, 9| -|Atan|22, 7| -|Atanh|22, 9| -|AveragePool|22, 19, 11, 10, 7, 1| +|Asin|7| +|Asinh|9| +|Atan|7| +|Atanh|9| +|AveragePool|19, 11, 10, 7, 1| |BatchNormalization|15, 14, 9, 7, 6, 1| |BitShift|11| |BitwiseAnd|18| @@ -37,63 +37,63 @@ For an operator input/output's differentiability, it can be differentiable, |ConcatFromSequence|11| |Constant|21, 19, 13, 12, 11, 9, 1| |ConstantOfShape|21, 20, 9| -|Conv|22, 11, 1| +|Conv|11, 1| |ConvInteger|10| -|ConvTranspose|22, 11, 1| -|Cos|22, 7| -|Cosh|22, 9| +|ConvTranspose|11, 1| +|Cos|7| +|Cosh|9| |CumSum|14, 11| |DFT|20, 17| -|DeformConv|22, 19| +|DeformConv|19| |DepthToSpace|13, 11, 1| |DequantizeLinear|21, 19, 13, 10| -|Det|22, 11| +|Det|11| |Div|14, 13, 7, 6, 1| -|Dropout|22, 13, 12, 10, 7, 6, 1| +|Dropout|13, 12, 10, 7, 6, 1| |Einsum|12| |Equal|19, 13, 11, 7, 1| |Erf|13, 9| |Exp|13, 6, 1| |Expand|13, 8| -|EyeLike|22, 9| +|EyeLike|9| |Flatten|21, 13, 11, 9, 1| |Floor|13, 6, 1| -|GRU|22, 14, 7, 3, 1| +|GRU|14, 7, 3, 1| |Gather|13, 11, 1| |GatherElements|13, 11| |GatherND|13, 12, 11| |Gemm|13, 11, 9, 7, 6, 1| -|GlobalAveragePool|22, 1| -|GlobalLpPool|22, 2, 1| -|GlobalMaxPool|22, 1| +|GlobalAveragePool|1| +|GlobalLpPool|2, 1| +|GlobalMaxPool|1| |Greater|13, 9, 7, 1| -|GridSample|22, 20, 16| +|GridSample|20, 16| |Hardmax|13, 11, 1| |Identity|21, 19, 16, 14, 13, 1| |If|21, 19, 16, 13, 11, 1| |ImageDecoder|20| -|InstanceNormalization|22, 6, 1| +|InstanceNormalization|6, 1| |IsInf|20, 10| |IsNaN|20, 13, 9| |LRN|13, 1| -|LSTM|22, 14, 7, 1| +|LSTM|14, 7, 1| |Less|13, 9, 7, 1| |Log|13, 6, 1| |Loop|21, 19, 16, 13, 11, 1| -|LpNormalization|22, 1| -|LpPool|22, 18, 11, 2, 1| +|LpNormalization|1| +|LpPool|18, 11, 2, 1| |MatMul|13, 9, 1| |MatMulInteger|10| |Max|13, 12, 8, 6, 1| -|MaxPool|22, 12, 11, 10, 8, 1| -|MaxRoiPool|22, 1| -|MaxUnpool|22, 11, 9| +|MaxPool|12, 11, 10, 8, 1| +|MaxRoiPool|1| +|MaxUnpool|11, 9| |Mean|13, 8, 6, 1| |MelWeightMatrix|17| |Min|13, 12, 8, 6, 1| |Mod|13, 10| |Mul|14, 13, 7, 6, 1| -|Multinomial|22, 7| +|Multinomial|7| |Neg|13, 6, 1| |NonMaxSuppression|11, 10| |NonZero|13, 9| @@ -108,11 +108,11 @@ For an operator input/output's differentiability, it can be differentiable, |QLinearConv|10| |QLinearMatMul|21, 10| |QuantizeLinear|21, 19, 13, 10| -|RNN|22, 14, 7, 1| -|RandomNormal|22, 1| -|RandomNormalLike|22, 1| -|RandomUniform|22, 1| -|RandomUniformLike|22, 1| +|RNN|14, 7, 1| +|RandomNormal|1| +|RandomNormalLike|1| +|RandomUniform|1| +|RandomUniformLike|1| |Reciprocal|13, 6, 1| |ReduceMax|20, 18, 13, 12, 11, 1| |ReduceMean|18, 13, 11, 1| @@ -123,8 +123,8 @@ For an operator input/output's differentiability, it can be differentiable, |Reshape|21, 19, 14, 13, 5, 1| |Resize|19, 18, 13, 11, 10| |ReverseSequence|10| -|RoiAlign|22, 16, 10| -|Round|22, 11| +|RoiAlign|16, 10| +|Round|11| |STFT|17| |Scan|21, 19, 16, 11, 9, 8| |Scatter (deprecated)|11, 9| @@ -139,8 +139,8 @@ For an operator input/output's differentiability, it can be differentiable, |Shape|21, 19, 15, 13, 1| |Sigmoid|13, 6, 1| |Sign|13, 9| -|Sin|22, 7| -|Sinh|22, 9| +|Sin|7| +|Sinh|9| |Size|21, 19, 13, 1| |Slice|13, 11, 10, 1| |SpaceToDepth|13, 1| @@ -153,7 +153,7 @@ For an operator input/output's differentiability, it can be differentiable, |StringSplit|20| |Sub|14, 13, 7, 6, 1| |Sum|13, 8, 6, 1| -|Tan|22, 7| +|Tan|7| |Tanh|13, 6, 1| |TfIdfVectorizer|9| |Tile|13, 6, 1| @@ -167,28 +167,28 @@ For an operator input/output's differentiability, it can be differentiable, |Xor|7, 1| |**Function**|**Since version**|**Function version**| |AffineGrid|20|20| -|Bernoulli|22, 15|22| +|Bernoulli|15|15| |BlackmanWindow|17|17| |CastLike|21, 19, 15|21| |Celu|12|12| |CenterCropPad|18|18| |Clip|13, 12, 11, 6, 1|13| |DynamicQuantizeLinear|11|11| -|Elu|22, 6, 1|18| +|Elu|6, 1|18| |Gelu|20|20| |GreaterOrEqual|16, 12|16| |GroupNormalization|21, 18|21| |HammingWindow|17|17| |HannWindow|17|17| -|HardSigmoid|22, 6, 1|18| -|HardSwish|22, 14|22| +|HardSigmoid|6, 1|18| +|HardSwish|14|14| |LayerNormalization|17|17, 18| |LeakyRelu|16, 6, 1|16| |LessOrEqual|16, 12|16| |LogSoftmax|13, 11, 1|13, 18| |MeanVarianceNormalization|13, 9|13, 18| -|Mish|22, 18|22| -|NegativeLogLikelihoodLoss|22, 13, 12|22| +|Mish|18|18| +|NegativeLogLikelihoodLoss|13, 12|13| |PRelu|16, 9, 7, 6, 1|16| |Range|11|11| |ReduceL1|18, 13, 11, 1|18| @@ -197,14 +197,14 @@ For an operator input/output's differentiability, it can be differentiable, |ReduceLogSumExp|18, 13, 11, 1|18| |ReduceSumSquare|18, 13, 11, 1|18| |Relu|14, 13, 6, 1|18| -|Selu|22, 6, 1|18| +|Selu|6, 1|18| |SequenceMap|17|17| |Shrink|9|18| |Softmax|13, 11, 1|13, 18| |SoftmaxCrossEntropyLoss|13, 12|13| -|Softplus|22, 1|18| -|Softsign|22, 1|18| -|ThresholdedRelu|22, 10|18| +|Softplus|1|18| +|Softsign|1|18| +|ThresholdedRelu|10|18| ### ai.onnx.preview.training |**Operator**|**Since version**|| @@ -277,7 +277,6 @@ expect(node, inputs=[x], outputs=[y], name="test_abs") ```python # SPDX-License-Identifier: Apache-2.0 -from __future__ import annotations import numpy as np @@ -296,9 +295,7 @@ def abs(input: np.ndarray) -> np.ndarray: # noqa: A001 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -317,7 +314,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -352,9 +349,7 @@ expect(node, inputs=[x], outputs=[y], name="test_acos") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Inputs @@ -373,7 +368,7 @@ Other versions of this operator: 9 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -1402,9 +1397,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -1423,7 +1416,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -1458,9 +1451,7 @@ expect(node, inputs=[x], outputs=[y], name="test_asin") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Inputs @@ -1479,7 +1470,7 @@ Other versions of this operator: 9 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -1514,9 +1505,7 @@ expect(node, inputs=[x], outputs=[y], name="test_asinh") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -1535,7 +1524,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -1570,9 +1559,7 @@ expect(node, inputs=[x], outputs=[y], name="test_atan") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Inputs @@ -1591,7 +1578,7 @@ Other versions of this operator: 9 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -1657,9 +1644,9 @@ expect(node, inputs=[x], outputs=[y], name="test_atanh") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 19 of the default ONNX operator set. -Other versions of this operator: 1, 7, 10, 11, 19 +Other versions of this operator: 1, 7, 10, 11 #### Attributes @@ -1697,7 +1684,7 @@ Other versions of this operator: 1, -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -2623,9 +2610,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator:
15 +This version of the operator has been available since version 15 of the default ONNX operator set. #### Attributes @@ -2653,9 +2638,9 @@ Other versions of this operator: 15 #### Type Constraints
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T1 : tensor(float16), tensor(float), tensor(double)
Constrain input types to float tensors.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
+
T2 : tensor(float16), tensor(float), tensor(double), tensor(bfloat16), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bool)
Constrain output types to all numeric tensors and bool tensors.
@@ -3888,9 +3873,9 @@ Other versions of this operator: 15, -
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
+
T1 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
Constrain input types. Casting from complex is not supported.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
+
T2 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool), tensor(string), tensor(bfloat16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(uint4), tensor(int4)
Constrain output types. Casting to complex is not supported.
@@ -5281,7 +5266,7 @@ Other versions of this operator:
1, concat ```python -test_cases: dict[str, Sequence[Any]] = { +test_cases: Dict[str, Sequence[Any]] = { "1d": ([1, 2], [3, 4]), "2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]), "3d": ( @@ -5561,9 +5546,9 @@ expect(node, inputs=[x], outputs=[y], name="test_constantofshape_int_zeros") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 11 of the default ONNX operator set. -Other versions of this operator: 1, 11 +Other versions of this operator: 1 #### Attributes @@ -5603,7 +5588,7 @@ Other versions of this operator: 1, 1, 11 +Other versions of this operator: 1 #### Attributes @@ -6065,7 +6050,7 @@ Other versions of this operator: 1, < #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -6506,9 +6491,7 @@ expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_pads") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -6527,7 +6510,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -6562,9 +6545,7 @@ expect(node, inputs=[x], outputs=[y], name="test_cos") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Inputs @@ -6583,7 +6564,7 @@ Other versions of this operator: 9 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -6960,9 +6941,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 19 +This version of the operator has been available since version 19 of the default ONNX operator set. #### Attributes @@ -7006,7 +6985,7 @@ Other versions of this operator: 19 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -7700,7 +7679,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) x_scale = np.float32(2) -x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) +x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1]) y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) expect( @@ -7754,7 +7733,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) x_scale = np.float32(2) -x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) +x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1]) y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) expect( @@ -7778,9 +7757,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 11 +This version of the operator has been available since version 11 of the default ONNX operator set. #### Inputs @@ -7799,7 +7776,7 @@ Other versions of this operator: 11 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to floating-point tensors.
@@ -7949,9 +7926,9 @@ expect(node, inputs=[x, y], outputs=[z], name="test_div_bcast") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 13 of the default ONNX operator set. -Other versions of this operator: 1, 6, 7, 10, 12, 13 +Other versions of this operator: 1, 6, 7, 10, 12 #### Attributes @@ -7983,9 +7960,9 @@ Other versions of this operator: 1, -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
+
T : tensor(float16), tensor(float), tensor(double), tensor(bfloat16)
Constrain input and output types to float tensors.
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
+
T1 : tensor(float16), tensor(float), tensor(double)
Constrain input 'ratio' types to float tensors.
T2 : tensor(bool)
Constrain output 'mask' types to boolean tensors.
@@ -8535,9 +8512,9 @@ expect(node, inputs=[X], outputs=[Y], name="test_einsum_transpose") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 6 of the default ONNX operator set. -Other versions of this operator:
1, 6 +Other versions of this operator: 1 #### Attributes @@ -8563,7 +8540,7 @@ Other versions of this operator: 1, 1, 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Attributes @@ -8981,9 +8956,9 @@ Other versions of this operator: 9 #### Type Constraints
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
+
T1 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool)
Constrain input types. Strings and complex are not supported.
-
T2 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(bool)
+
T2 : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(bool)
Constrain output types. Strings and complex are not supported.
@@ -9280,9 +9255,9 @@ expect(node, inputs=[x], outputs=[y], name="test_floor") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 14 of the default ONNX operator set. -Other versions of this operator: 1, 3, 7, 14 +Other versions of this operator: 1, 3, 7 #### Attributes @@ -9334,7 +9309,7 @@ Other versions of this operator: 1, 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Inputs @@ -10477,7 +10450,7 @@ Other versions of this operator: 1 -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -10536,9 +10509,9 @@ expect(node, inputs=[x], outputs=[y], name="test_globalaveragepool_precomputed") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 2 of the default ONNX operator set. -Other versions of this operator: 1, 2 +Other versions of this operator: 1 #### Attributes @@ -10577,9 +10550,7 @@ Other versions of this operator: 1, 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Inputs @@ -10598,7 +10569,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -10828,9 +10799,9 @@ Other versions of this operator: 12 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 20 of the default ONNX operator set. -Other versions of this operator: 16, 20 +Other versions of this operator: 16 #### Attributes @@ -10862,9 +10833,9 @@ Other versions of this operator: 16, -
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
+
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
Constrain input `X` and output `Y` types to all tensor types.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T2 : tensor(float16), tensor(float), tensor(double)
Constrain grid types to float tensors.
@@ -11785,9 +11756,9 @@ expect(node, inputs=[size], outputs=[y], name="test_hannwindow_symmetric") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 6 of the default ONNX operator set. -Other versions of this operator:
1, 6 +Other versions of this operator: 1 #### Attributes @@ -11815,7 +11786,7 @@ Other versions of this operator: 1, -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -11869,9 +11840,7 @@ expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid_default") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator:
14 +This version of the operator has been available since version 14 of the default ONNX operator set. #### Inputs @@ -11890,7 +11859,7 @@ Other versions of this operator: 14 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -12738,9 +12707,9 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 6 of the default ONNX operator set. -Other versions of this operator: 1, 6 +Other versions of this operator: 1 #### Attributes @@ -12770,7 +12739,7 @@ Other versions of this operator: #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -13191,9 +13160,9 @@ expect(node, inputs=[x], outputs=[y], name="test_lrn") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 14 of the default ONNX operator set. -Other versions of this operator:
1, 7, 14 +Other versions of this operator: 1, 7 #### Attributes @@ -13251,7 +13220,7 @@ Other versions of this operator: 1, 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -14765,7 +14732,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -14798,9 +14765,9 @@ Other versions of this operator: 1 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 18 of the default ONNX operator set. -Other versions of this operator: 1, 2, 11, 18 +Other versions of this operator: 1, 2, 11 #### Attributes @@ -14838,7 +14805,7 @@ Other versions of this operator: 1, -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -15457,9 +15424,9 @@ for op_dtype in all_numeric_dtypes: #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 12 of the default ONNX operator set. -Other versions of this operator:
1, 8, 10, 11, 12 +Other versions of this operator: 1, 8, 10, 11 #### Attributes @@ -15499,7 +15466,7 @@ Other versions of this operator: 1, -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8)
+
T : tensor(float16), tensor(float), tensor(double), tensor(int8), tensor(uint8)
Constrain input and output types to float and 8 bit tensors.
I : tensor(int64)
Constrain index tensor to int64
@@ -16326,9 +16293,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator:
1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -16358,7 +16323,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -16386,9 +16351,9 @@ Other versions of this operator: 1 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 11 of the default ONNX operator set. -Other versions of this operator: 9, 11 +Other versions of this operator: 9 #### Attributes @@ -16422,7 +16387,7 @@ Other versions of this operator: 9, -
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T1 : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
T2 : tensor(int64)
Constrain index tensor to int64
@@ -16915,9 +16880,7 @@ for op_dtype in all_numeric_dtypes: #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator:
18 +This version of the operator has been available since version 18 of the default ONNX operator set. #### Inputs @@ -16936,7 +16899,7 @@ Other versions of this operator: 18 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input X and output types to float tensors.
@@ -17353,9 +17316,7 @@ expect(node, inputs=[x, y], outputs=[z], name="test_mul_bcast") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Attributes @@ -17385,7 +17346,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T1 : tensor(float16), tensor(float), tensor(double)
Constrain input types to float tensors.
T2 : tensor(int32), tensor(int64)
Constrain output types to integral tensors.
@@ -17556,9 +17517,9 @@ expect(node, inputs=[x], outputs=[y], name="test_neg") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 13 of the default ONNX operator set. -Other versions of this operator: 12, 13 +Other versions of this operator: 12 #### Attributes @@ -17590,7 +17551,7 @@ Other versions of this operator: 10
axis : int (default is 1)
-
(Optional) The axis of the dequantizing dimension of the input tensor. Used only for per-axis and blocked quantization. Negative value means counting dimensions from the back. Accepted range is `[-r, r-1]` where `r = rank(input)`. When the rank of the input is 1, per-tensor quantization is applied, rendering the axis unnecessary in this scenario.
+
(Optional) The axis of the dequantizing dimension of the input tensor. Used for per-axis and blocked quantization. Negative value means counting dimensions from the back. Accepted range is `[-r, r-1]` where `r = rank(input)`.
block_size : int (default is 0)
(Optional) The size of the quantization block (number of times every scale is replicated). Used only for blocked quantization. The block size is a positive integer. Given `x` shape `(D0, ..., Di, ..., Dn)`, `y_scale` shape `(S0, ... Si, ...Sn)` and `axis=i`, the accepted range is `[ceil(Di/Si), ceil(Di/(Si-1))-1]`
output_dtype : int (default is 0)
@@ -20532,7 +20493,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) +y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96]) expect( @@ -20558,7 +20519,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) +y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96]) expect( @@ -20657,7 +20618,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) + "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7] @@ -20777,7 +20738,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) + "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11] @@ -20839,9 +20800,9 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 14 of the default ONNX operator set. -Other versions of this operator: 1, 7, 14 +Other versions of this operator: 1, 7 #### Attributes @@ -20891,7 +20852,7 @@ Other versions of this operator: 1, 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -21092,7 +21051,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain output types to float tensors.
@@ -21109,9 +21068,7 @@ Other versions of this operator: 1 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -21143,9 +21100,9 @@ Other versions of this operator: 1 #### Type Constraints
-
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
+
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T2 : tensor(float16), tensor(float), tensor(double)
Constrain output types to float tensors.
@@ -21161,9 +21118,7 @@ Other versions of this operator: 1 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -21193,7 +21148,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain output types to float tensors.
@@ -21210,9 +21165,7 @@ Other versions of this operator: 1 #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Attributes @@ -21244,9 +21197,9 @@ Other versions of this operator: 1 -
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(bfloat16), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
+
T1 : tensor(uint8), tensor(uint16), tensor(uint32), tensor(uint64), tensor(int8), tensor(int16), tensor(int32), tensor(int64), tensor(float16), tensor(float), tensor(double), tensor(string), tensor(bool), tensor(complex64), tensor(complex128)
Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.
-
T2 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T2 : tensor(float16), tensor(float), tensor(double)
Constrain output types to float tensors.
@@ -26638,9 +26591,9 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 16 of the default ONNX operator set. -Other versions of this operator: 10, 16 +Other versions of this operator: 10 #### Attributes @@ -26680,7 +26633,7 @@ Other versions of this operator: 10, -
T1 : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T1 : tensor(float16), tensor(float), tensor(double)
Constrain types to float tensors.
T2 : tensor(int64)
Constrain types to int tensors.
@@ -27025,9 +26978,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator:
11 +This version of the operator has been available since version 11 of the default ONNX operator set. #### Inputs @@ -27046,7 +26997,7 @@ Other versions of this operator: 11 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -28272,9 +28223,9 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. +This version of the operator has been available since version 6 of the default ONNX operator set. -Other versions of this operator: 1, 6 +Other versions of this operator: 1 #### Attributes @@ -28302,7 +28253,7 @@ Other versions of this operator: 1, 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -29349,7 +29298,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -29384,9 +29333,7 @@ expect(node, inputs=[x], outputs=[y], name="test_sin") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 9 +This version of the operator has been available since version 9 of the default ONNX operator set. #### Inputs @@ -29405,7 +29352,7 @@ Other versions of this operator: 9 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -31315,9 +31262,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Inputs @@ -31336,7 +31281,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -31373,9 +31318,7 @@ expect(node, inputs=[x], outputs=[y], name="test_softplus") #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 1 +This version of the operator has been available since version 1 of the default ONNX operator set. #### Inputs @@ -31394,7 +31337,7 @@ Other versions of this operator: 1 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -32994,9 +32937,7 @@ expect( #### Version -This version of the operator has been available since version 22 of the default ONNX operator set. - -Other versions of this operator: 7 +This version of the operator has been available since version 7 of the default ONNX operator set. #### Inputs @@ -33015,7 +32956,7 @@ Other versions of this operator: 7 #### Type Constraints
-
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
@@ -33071,7 +33012,7 @@ Other versions of this operator: 1, 10 +This version of the operator has been available since version 10 of the default ONNX operator set. #### Attributes @@ -33463,7 +33402,7 @@ Other versions of this operator: 10 -
T : tensor(bfloat16), tensor(float16), tensor(float), tensor(double)
+
T : tensor(float16), tensor(float), tensor(double)
Constrain input and output types to float tensors.
diff --git a/docs/TestCoverage.md b/docs/TestCoverage.md index bfdddc1f77b..124122ae2d6 100644 --- a/docs/TestCoverage.md +++ b/docs/TestCoverage.md @@ -3773,7 +3773,7 @@ There are 1 test cases, listed as following: concat ```python -test_cases: dict[str, Sequence[Any]] = { +test_cases: Dict[str, Sequence[Any]] = { "1d": ([1, 2], [3, 4]), "2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]), "3d": ( @@ -5449,7 +5449,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) x_scale = np.float32(2) -x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) +x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1]) y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) expect( @@ -5499,7 +5499,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) x_scale = np.float32(2) -x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) +x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1]) y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) expect( @@ -9665,7 +9665,7 @@ node = onnx.helper.make_node( ) trip_count = np.array(5).astype(np.int64) -seq_empty: list[Any] = [] +seq_empty: List[Any] = [] seq_res = [x[: int(i)] for i in x] cond = np.array(1).astype(bool) expect( @@ -9860,7 +9860,7 @@ node = onnx.helper.make_node( trip_count = np.array(5).astype(np.int64) cond = np.array(1).astype(bool) seq_res = compute_loop_outputs(x, [x0], trip_count) -opt_seq_in: list[Any] = [x0] +opt_seq_in: List[Any] = [x0] expect( node, inputs=[trip_count, cond, opt_seq_in], @@ -13955,7 +13955,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) +y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96]) expect( @@ -13979,7 +13979,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) +y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96]) expect( @@ -14074,7 +14074,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) + "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7] @@ -14188,7 +14188,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) + "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11] diff --git a/onnx/backend/test/case/node/swish.py b/onnx/backend/test/case/node/swish.py new file mode 100644 index 00000000000..dcb751a9b15 --- /dev/null +++ b/onnx/backend/test/case/node/swish.py @@ -0,0 +1,29 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def swish(x: np.ndarray, alpha: np.float16) -> np.ndarray: + return x * 1 / (1 + np.exp(np.negative(x * alpha))) + + +class Swish(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Swish", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([3, 4, 5]).astype(np.float32) + y = swish(x, alpha=1.0) + + expect(node, inputs=[x], outputs=[y], name="test_swish") diff --git a/onnx/backend/test/data/node/test_acos/model.onnx b/onnx/backend/test/data/node/test_acos/model.onnx index 206ab391191916d449768262724d6fbc44399a16..cc7c695f5588ed43639e0722d75a83299f51f716 100644 GIT binary patch delta 10 RcmYdJX5wI;$dtgy4gd?o0nY#c delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_acos_example/model.onnx b/onnx/backend/test/data/node/test_acos_example/model.onnx index 8b29942f13fdea61b8ebcaa7ded79ed50712728c..c986dd6ba90992524b98a16775de129aa618901f 100644 GIT binary patch delta 10 Rcma!!X5wI;$P~fI4gd=i0k;4E delta 10 Rcma!!X5!$Q$P~dS1^^380nGpa diff --git a/onnx/backend/test/data/node/test_acosh/model.onnx b/onnx/backend/test/data/node/test_acosh/model.onnx index 916010c59b8294b727797c6025fd15302404717c..9651625d11c26d371c54d933778912aa4499995b 100644 GIT binary patch delta 10 RcmYdIW#V9&$dts$2>=WC0oVWl delta 10 RcmYdIW#ZtP$dtq=1^^5w0qXz& diff --git a/onnx/backend/test/data/node/test_acosh_example/model.onnx b/onnx/backend/test/data/node/test_acosh_example/model.onnx index 1fbfea14d9c8346f9f6a25e2415bc29eb155b73a..42eb9a1641a6f19797462e4c0643e15df0c409d8 100644 GIT binary patch delta 10 Rcma!zW#V9&$P~rM2>=U60l)wN delta 10 Rcma!zW#ZtP$P~pW1^^3q0n-2g diff --git a/onnx/backend/test/data/node/test_asin/model.onnx b/onnx/backend/test/data/node/test_asin/model.onnx index e7d9804878429f691543d2d886c59fc5c6a87fdd..a7caca1b1a5905f86e96e7c94f458978ac96f55e 100644 GIT binary patch delta 10 RcmYdJX5wI;$dtgy4gd?o0nY#c delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_asin_example/model.onnx b/onnx/backend/test/data/node/test_asin_example/model.onnx index 22ddea64b64239018f4ea8ff5ccdd48823ba3be1..a20be5821295924dc54cfb4101f826ffa5c6cd09 100644 GIT binary patch delta 10 Rcma!!X5wI;$P~fI4gd=i0k;4E delta 10 Rcma!!X5!$Q$P~dS1^^380nGpa diff --git a/onnx/backend/test/data/node/test_asinh/model.onnx b/onnx/backend/test/data/node/test_asinh/model.onnx index 96d1db46bdffb745c260aaf584df161ebcfa3170..922c0aef88e7865eb008384168a723c177004bc7 100644 GIT binary patch delta 10 RcmYdIW#V9&$dts$2>=WC0oVWl delta 10 RcmYdIW#ZtP$dtq=1^^5w0qXz& diff --git a/onnx/backend/test/data/node/test_asinh_example/model.onnx b/onnx/backend/test/data/node/test_asinh_example/model.onnx index eb4778849764a833435fbb51a65e100ec261876d..7d02941a6752f8e6d7b738bb11c22981cdf21cc5 100644 GIT binary patch delta 10 Rcma!zW#V9&$P~rM2>=U60l)wN delta 10 Rcma!zW#ZtP$P~pW1^^3q0n-2g diff --git a/onnx/backend/test/data/node/test_atan/model.onnx b/onnx/backend/test/data/node/test_atan/model.onnx index 4f2f5deee1db2b71326397d3e6dede319da7e122..7cd0e8e041693c6f36ae9defcf2c5ad23958aa8f 100644 GIT binary patch delta 10 RcmYdJX5wI;$dtgy4gd?o0nY#c delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_atan_example/model.onnx b/onnx/backend/test/data/node/test_atan_example/model.onnx index bfe9894a60d1613e3d3edd52488e54d58d831714..7b9525ee935dac2709820f7d56dd3f608718ded0 100644 GIT binary patch delta 10 Rcma!!X5wI;$P~fI4gd=i0k;4E delta 10 Rcma!!X5!$Q$P~dS1^^380nGpa diff --git a/onnx/backend/test/data/node/test_atanh/model.onnx b/onnx/backend/test/data/node/test_atanh/model.onnx index a1db102d3e7c3fb11718e81f807604ddfdfb4033..3d1716d23a512d9ec9660f69629af9c2552a116a 100644 GIT binary patch delta 10 RcmYdIW#V9&$dts$2>=WC0oVWl delta 10 RcmYdIW#ZtP$dtq=1^^5w0qXz& diff --git a/onnx/backend/test/data/node/test_atanh_example/model.onnx b/onnx/backend/test/data/node/test_atanh_example/model.onnx index d0efa30625fce1d7a3678468ba5b586f47bf7bb9..4e6d72d6a750e45834d4bde0dc47441dd001ab9c 100644 GIT binary patch delta 10 Rcma!zW#V9&$P~rM2>=U60l)wN delta 10 Rcma!zW#ZtP$P~pW1^^3q0n-2g diff --git a/onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx b/onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx index 36f6b96a29b72520484a3c8b8dbc7b3333c2d89c..14c0309706c7bffb898a3490193b2877a2b5de77 100644 GIT binary patch delta 12 TcmbQpIFXTwgL5KNAEPh;5wHSn delta 12 TcmbQpIFXTwgKHvFAEOum5xD|! diff --git a/onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx b/onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx index 168a69574f57201bfc0fdf78529c9d5aba53ba90..bb02ae58ca6dc045ee9ea61a7aab62368a61ace6 100644 GIT binary patch delta 12 TcmdnXxR;TMgL5L&E=FMh75oD6 delta 12 TcmdnXxR;TMgKHwwE=Dl`76k(J diff --git a/onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx b/onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx index 11a1b13ef759d46ceade4803416f70bab2667654..bd1efdecb29da0558795db8de0c3ea8d45285c0e 100644 GIT binary patch delta 12 TcmbQkIERsmgL5L&Oh#b<62k(H delta 12 TcmbQkIERsmgKHwwOhz#P63haU diff --git a/onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx b/onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx index ce3c86ed527fb2a459e7d0c12b45fc911981dd78..7de2313225ea1e6d2d1a341d9a3b0f3472bf20f0 100644 GIT binary patch delta 12 Tcmcb{c#V;XgL5L&Wkz8D7;OVQ delta 12 Tcmcb{c#V;XgKHwwWkxXo7yDgL5L&YDQrI6omrB delta 12 TcmZ3-xQ>yDgKHwwYDO^t6pjMO diff --git a/onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx index 4d1bcc84eb0b05b239a3745c7e0989d3aeb1a0c1..638a15930cdd5abc52a2d7bd720e42d0ebbd03f8 100644 GIT binary patch delta 12 TcmZ3+xQvmBgL5L&Vn$&A6TJeg delta 12 TcmZ3+xQvmBgKHwwVn#6l6UG9t diff --git a/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx index 7a757250d57b038290686e598571e50941c68a15..a46212cff768a169c6279de261732b73ecfc5584 100644 GIT binary patch delta 13 UcmZ3_w4RBHgL5O34kM#502V3&Y5)KL delta 13 UcmZ3_w4RBHgKHy`4kM!&02VX?ZU6uP diff --git a/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx index 894e85fdd48c3e02945dd87d4b4d894494d65d89..2354fbfb7b18381024c4a1553a5d26ce59766eef 100644 GIT binary patch delta 13 UcmZ3-w2q01gL5O3HY1}j02U4cX8-^I delta 13 UcmZ3-w2q01gKHy`HY1}L02UYmYXATM diff --git a/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx index eafe7769f21a050b69a64257bd0c57916969e5b3..79a25a22420d73b1149d910989b2c28d29b43455 100644 GIT binary patch delta 13 UcmZ3_w4RBHgL5O34kM#502V3&Y5)KL delta 13 UcmZ3_w4RBHgKHy`4kM!&02VX?ZU6uP diff --git a/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx index 27f6fb1f595bd2678be7a6c5f0665a15d175debb..310be73f5384725693a70c9086fb64e4eda2b486 100644 GIT binary patch delta 13 UcmZ3-w2q01gL5O3HY1}j02U4cX8-^I delta 13 UcmZ3-w2q01gKHy`HY1}L02UYmYXATM diff --git a/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx b/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx index 883cdbefa603396efaa86a823a32e6f4af833bbc..7a36e26fce9bc3d5fa2e1bbd3b3b3b6d7158e7a9 100644 GIT binary patch delta 12 TcmaFG_==Hn+a delta 13 UcmdnOw1tU@gKHy`AtR$002aUkeEVrT_o{ delta 13 UcmX@ibeM^WgKHy`10$mt02pKfssI20 diff --git a/onnx/backend/test/data/node/test_bernoulli/model.onnx b/onnx/backend/test/data/node/test_bernoulli/model.onnx index 702c9960fd4579906c5ac7e04926532742cfdea1..47b791d688556421f70fa7a392a178e00ead5749 100644 GIT binary patch delta 10 Rcma!zW#ZtN$P~rM4*&~g0m=XX delta 10 Rcma!zW#ZtP$P~pW1^^3q0n-2g diff --git a/onnx/backend/test/data/node/test_bernoulli_double/model.onnx b/onnx/backend/test/data/node/test_bernoulli_double/model.onnx index 929e4196e3bb931de8541a8006ee106a9984720e..f954a8dc57f900d89b4100a9f7b24767cf5ac6ab 100644 GIT binary patch delta 10 RcmXRaV&dSK$du2>4*(4_0to;B delta 10 RcmXRaV&dSM$du101^^940ulfK diff --git a/onnx/backend/test/data/node/test_bernoulli_double_expanded/model.onnx b/onnx/backend/test/data/node/test_bernoulli_double_expanded/model.onnx index 558e38225b0355325c4079ffce9b7157ec3555f6..fcf5060902691c96cd7bdc04977dbc5efd0726b1 100644 GIT binary patch delta 13 UcmdnTypNfQgJUDpZbn9a02$c>6aWAK delta 13 UcmdnTypNfQgKHzxZbn8i02%HB9RL6T diff --git a/onnx/backend/test/data/node/test_bernoulli_expanded/model.onnx b/onnx/backend/test/data/node/test_bernoulli_expanded/model.onnx index c510c81bf60d6b55982d726dde3859c11aa59225..785d12f46e8844b828436eebb26535324daaa94a 100644 GIT binary patch delta 13 UcmbQqJd>G;gJUDpG)6{#02R#wr2qf` delta 13 UcmbQqJd>G;gKHzxG)6`-02Sf_t^fc4 diff --git a/onnx/backend/test/data/node/test_bernoulli_seed/model.onnx b/onnx/backend/test/data/node/test_bernoulli_seed/model.onnx index ea69f01240e29ce6a26d1fcc8f7828d1fead8603..85f1b82b5e2cd64bd83a9d0f76094fd604de82dd 100644 GIT binary patch delta 10 RcmXRaV&dSK$du2>4*(4_0to;B delta 10 RcmXRaV&dSM$du101^^940ulfK diff --git a/onnx/backend/test/data/node/test_bernoulli_seed_expanded/model.onnx b/onnx/backend/test/data/node/test_bernoulli_seed_expanded/model.onnx index 3236cbfec81e686d5317f62fb01fc11197520789..e195a6ce93c7db0da36e3fc0d79e30ed56b4b811 100644 GIT binary patch delta 13 UcmX@Ye1w^agJUDpK}JS?02+YxR#NLgKZ+yDn@Pq6jB1V delta 12 TcmZ3>xR#NLgKHwwDn>B?6m1? diff --git a/onnx/backend/test/data/node/test_convtranspose_kernel_shape/model.onnx b/onnx/backend/test/data/node/test_convtranspose_kernel_shape/model.onnx index 4d328e9cdf2b47e26f5a0adac764b05673a34e8e..e0948814573b9b5132b2f623086d001d73bbaa62 100644 GIT binary patch delta 13 UcmeBT>SAKzVB5&V&dA6O01^QK@Bjb+ delta 13 UcmeBT>SAKz;M&N<&d4YR01_wx{{R30 diff --git a/onnx/backend/test/data/node/test_convtranspose_output_shape/model.onnx b/onnx/backend/test/data/node/test_convtranspose_output_shape/model.onnx index 8b9b86216a3e42b0ea9552f6518f3b283a8b8805..344a21a8c3c4fb86b54350f9a2450cd443d11ec3 100644 GIT binary patch delta 12 Tcmcb@c!iOPgKZ+yMMiD_7$E~I delta 12 Tcmcb@c!iOPgKHwwMMg0I7(@d) diff --git a/onnx/backend/test/data/node/test_convtranspose_pad/model.onnx b/onnx/backend/test/data/node/test_convtranspose_pad/model.onnx index b972e2653e20c1f338e781e970d91f93af23e38e..bb593e66f076af1002b2976e1b2017da44cc5ad5 100644 GIT binary patch delta 12 TcmX@hc$SfggKZ+yDMoGp7jgp@ delta 12 TcmX@hc$SfggKHwwDMm2>7nK7g diff --git a/onnx/backend/test/data/node/test_convtranspose_pads/model.onnx b/onnx/backend/test/data/node/test_convtranspose_pads/model.onnx index 1e7116447cb70685f3f58a884fe1b1162677a638..b17ff879e67a54657ea0227e24f80196f9646866 100644 GIT binary patch delta 12 TcmX@Xc!H6MgKZ+yQATb67WD%M delta 12 TcmX@Xc!H6MgKHwwQARNU7Z?K; diff --git a/onnx/backend/test/data/node/test_cos/model.onnx b/onnx/backend/test/data/node/test_cos/model.onnx index 9e1260acfac92ee79207aee84bab640f06c19183..5ec0d0c3247d06b1ca670c45f1a77f6985884e5e 100644 GIT binary patch delta 10 RcmYdHWa40+$P~xO4gd?60m%RW delta 10 RcmYdHWa8kO$P~vY1^^4t0p9=s diff --git a/onnx/backend/test/data/node/test_cos_example/model.onnx b/onnx/backend/test/data/node/test_cos_example/model.onnx index b74fcbe66e2cec92728b1c9d02e85d85b2bdddd9..6726ce0230ae08a60af3e7e6ad03b080ddbbf655 100644 GIT binary patch delta 10 Rcma!yWa40+$P~uN4gd=00kHr8 delta 10 Rcma!yWa8kO$P~sX1^^2n0mlFU diff --git a/onnx/backend/test/data/node/test_cosh/model.onnx b/onnx/backend/test/data/node/test_cosh/model.onnx index e74ac53653ee14bc7b84f676b035010e7670a4d5..68d88a42c038c4c48b7bf432d188e0196ed1bdc9 100644 GIT binary patch delta 10 RcmYdJX5wI($dtgy2>=Vr0nz{f delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_cosh_example/model.onnx b/onnx/backend/test/data/node/test_cosh_example/model.onnx index 7dc7ab4dc5f74b841c9db0bd978921edc14a5837..db9eec4d05155d6b9d574783755525f7e0ca9cb2 100644 GIT binary patch delta 10 Rcma!!X5wI($P~fI2>=Tl0lEMH delta 10 Rcma!!X5!$Q$P~dS1^^380nGpa diff --git a/onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx b/onnx/backend/test/data/node/test_deform_conv_with_mask_bias/model.onnx index a48939459b2258bbe04b252cc4f43eba13d7f441..3f5948c98a2284ff489fdaf87b2eed9e721602b2 100644 GIT binary patch delta 13 UcmdnSw2g_0gL5O3F(ace02b~7e*gdg delta 13 UcmdnSw2g_0gKHy`F(acG02cTHg8%>k diff --git a/onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx b/onnx/backend/test/data/node/test_deform_conv_with_multiple_offset_groups/model.onnx index bb2b554d5b070f9843d73d262e5de136b134b857..2adbaa41881f32fb3f958393c9e0272c0626e66e 100644 GIT binary patch delta 13 UcmbQoG>?gigL5O3G$W%h02EFFHvj+t delta 13 UcmbQoG>?gigKHy`G$W%J02EjPI{*Lx diff --git a/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb index c0d4733202c..27697b35887 100644 --- a/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb @@ -1 +1,2 @@ -*B x_zero_point \ No newline at end of file +*B +zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb index 118b7d9a7ee..8876e08c6db 100644 --- a/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb @@ -1 +1,2 @@ -*B x_zero_point \ No newline at end of file +*B +zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_det_2d/model.onnx b/onnx/backend/test/data/node/test_det_2d/model.onnx index b2f1c52802f34681b340255905c0392da7a83a9b..c1ceae61c0ec21e5497e99ce49cd2c5446d0d2fc 100644 GIT binary patch delta 10 RcmWFuVd7w$$P~!P4FC#{0jU50 delta 10 RcmWFuVdCJL$P~yZ1^^1O0k{AF diff --git a/onnx/backend/test/data/node/test_det_nd/model.onnx b/onnx/backend/test/data/node/test_det_nd/model.onnx index 26d4c871da4e274940c070bb8017756069beb1e1..07a153a1c58b26dca3ea9e1f61a521b52e1633f2 100644 GIT binary patch delta 10 Rcma!vVd7w$$P~%Q4FC&20l@$O delta 10 Rcma!vVdCJL$P~#a1^^3U0nh*d diff --git a/onnx/backend/test/data/node/test_dropout_default/model.onnx b/onnx/backend/test/data/node/test_dropout_default/model.onnx index be75e7dbb46ca7f0b1cc0984e644c2df07a32692..184e44851900156a0d8cb7b648db3f8ed5421857 100644 GIT binary patch delta 10 Rcmb=cW8z?+$W+b93jhu?0xAFi delta 10 Rcmb=cW8&bN$W+ZJ1^^CD0yY2u diff --git a/onnx/backend/test/data/node/test_dropout_default_mask/model.onnx b/onnx/backend/test/data/node/test_dropout_default_mask/model.onnx index f45cd7a608049b61d1ef3456d90a72d673b6fb54..ef7b9fa8e33239015cfe83fc611b5b1a15a9ebb9 100644 GIT binary patch delta 12 TcmZ3$xPXy~gMA{?Tt;316BPoI delta 12 TcmZ3$xPXy~gKHwwTt+bf6EFgv diff --git a/onnx/backend/test/data/node/test_dropout_default_mask_ratio/model.onnx b/onnx/backend/test/data/node/test_dropout_default_mask_ratio/model.onnx index 343482d6c83c816ce478e9c954c91a8567edd426..5b1f07074959e9289a84cfc771b1a8c7c1895437 100644 GIT binary patch delta 12 TcmdnSxQ&sCgMA{?W=38B6*~gd delta 12 TcmdnSxQ&sCgKHwwW=1gp6;=Y^ diff --git a/onnx/backend/test/data/node/test_dropout_default_ratio/model.onnx b/onnx/backend/test/data/node/test_dropout_default_ratio/model.onnx index 3f079ba46215fe1d877fe3c777b1f3906c795475..8deb0febfd3d5a3837cd69b4a7ecf3bb1be40b81 100644 GIT binary patch delta 12 TcmbQjIE9gkgMA{?L`Gf!5$OVM delta 12 TcmbQjIE9gkgKHwwL`E?H5(ENz diff --git a/onnx/backend/test/data/node/test_elu/model.onnx b/onnx/backend/test/data/node/test_elu/model.onnx index dd1003b7c4c300cdcdce2d4233e024b32ed46891..141d895bba6c22541df3449a66f9d3e187b35f01 100644 GIT binary patch delta 10 RcmXRaV&Y()$du2>1^^8V0s8;| delta 10 RcmXRaV&dSM$du101^^940ulfK diff --git a/onnx/backend/test/data/node/test_elu_default/model.onnx b/onnx/backend/test/data/node/test_elu_default/model.onnx index 3a979710e0214ae8c3045826ccc67aff8aa8fe40..4007dc3368838f7645f474faf7e6b147defc1168 100644 GIT binary patch delta 10 Rcmd1IWa40+$dty&1^^630pI`t delta 10 Rcmd1IWa8kO$dtw?1^^6z0rvm^ diff --git a/onnx/backend/test/data/node/test_elu_example/model.onnx b/onnx/backend/test/data/node/test_elu_example/model.onnx index 99091bb51347fb399c2107824b451015151db6e8..9ea0c1df665fc0de368d7264b0d67026e350332b 100644 GIT binary patch delta 10 Rcmd1GV&Y()$dt~=1^^6P0pkDw delta 10 Rcmd1GV&dSM$dt|~1^^6}0r~&{ diff --git a/onnx/backend/test/data/node/test_eyelike_populate_off_main_diagonal/model.onnx b/onnx/backend/test/data/node/test_eyelike_populate_off_main_diagonal/model.onnx index 60a068b8e20c3d02aa9aa29c92ad3f84b38fa0c3..0b98de4160d929140a1f5c4a8905d1690eca012c 100644 GIT binary patch delta 12 TcmbQjIE9gkgJmMqL`F^k5z_)^ delta 12 TcmbQjIE9gkgKHwwL`E?H5(ENz diff --git a/onnx/backend/test/data/node/test_eyelike_with_dtype/model.onnx b/onnx/backend/test/data/node/test_eyelike_with_dtype/model.onnx index 8599bfdb16a67ae6ab8fb832dab0219f5b4257ee..ca799fb505d238c46936791742564f7b0df9b235 100644 GIT binary patch delta 10 Rcmb=bV&Y(#$W+eA2>=bn0v7-P delta 10 Rcmb=bV&dSM$W+cK1^^BA0xAFi diff --git a/onnx/backend/test/data/node/test_eyelike_without_dtype/model.onnx b/onnx/backend/test/data/node/test_eyelike_without_dtype/model.onnx index 584a3328605989a093883ae662e42c8a027cbbb3..6c7db0b132ad494e8cd819ace02277c087943364 100644 GIT binary patch delta 10 Rcmd1LXX0R)$dtp#2>=Y!0rmg@ delta 10 Rcmd1LXX4||u(V4ld-#>fZ&5V8V2 delta 12 TcmeBV>||u(;F`$P#wZ2=5b*+8 diff --git a/onnx/backend/test/data/node/test_gridsample/model.onnx b/onnx/backend/test/data/node/test_gridsample/model.onnx index 1d17074133eb32dda2f02b32b0ab7053d2cd82e8..334b8cca0ab2ae4675ba0a26123ba83e5f7a9546 100644 GIT binary patch delta 12 TcmaFJ_>hr_gL5L&Jw_1#8G!>{ delta 12 TcmaFJ_>hr_gKHwwJw`DA8Hod7 diff --git a/onnx/backend/test/data/node/test_gridsample_aligncorners_true/model.onnx b/onnx/backend/test/data/node/test_gridsample_aligncorners_true/model.onnx index 6def3c0df966c79be2c72f56a01e681693caeda2..40b0e8b6fab4feed92e827b4618d3a747c66747e 100644 GIT binary patch delta 12 Tcmcb~c$1NdgL5L&HAWEt7`X#R delta 12 Tcmcb~c$1NdgKHwwHAXQ27{LQc diff --git a/onnx/backend/test/data/node/test_gridsample_bicubic/model.onnx b/onnx/backend/test/data/node/test_gridsample_bicubic/model.onnx index 307d6519556d18aeccc5f870527bb2548f76454e..14cb848688146427246d0712b1f0b48300550856 100644 GIT binary patch delta 12 TcmdnNxPy_2gL5L&Rz?v36@UWc delta 12 TcmdnNxPy_2gKHwwRz@)Z6^H`n diff --git a/onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_0_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_0_additional_1/model.onnx index cf230353d27e2afef8930dfb3bf24f560a544526..ec7876ecf275e9257b42cd67601fce658ec13078 100644 GIT binary patch delta 12 TcmaFO_?nT4gL5L&3q}zD8hZnL delta 12 TcmaFO_?nT4gKHww3q~;j8iNCW diff --git a/onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_1_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_bicubic_align_corners_1_additional_1/model.onnx index a617350a9af440934aff8c9e59dc30551ce2b84a..19f37d045239bfaba7fff6243eb3dc0cd168fb9d 100644 GIT binary patch delta 12 TcmaFO_?nT4gL5L&3q}zD8hZnL delta 12 TcmaFO_?nT4gKHww3q~;j8iNCW diff --git a/onnx/backend/test/data/node/test_gridsample_bilinear/model.onnx b/onnx/backend/test/data/node/test_gridsample_bilinear/model.onnx index bb1b2ebd769ba8ba81f8cc6165235ef49e81a680..af5a0847336936466524592dbf2015e6a6cbe2c3 100644 GIT binary patch delta 12 TcmdnRxQmgAgL5L&c195Z6|w^9 delta 12 TcmdnRxQmgAgKHwwc1AG(6}kfK diff --git a/onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_0_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_0_additional_1/model.onnx index 07ac23f070b6f472b8a219ecbd2b5a4e6931639f..188342495b8a2cd40b5842b95742d68de11e9983 100644 GIT binary patch delta 12 TcmaFM_?D50gL5L&D@G9j8m$9@ delta 12 TcmaFM_?D50gKHwwD@HK@8npw3 diff --git a/onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_1_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_bilinear_align_corners_1_additional_1/model.onnx index c17ad25317b924f1975699f2faf20e80edbfd7af..e5686c3c37d5f5b1aee7a4c91a60ca6fb56ca52f 100644 GIT binary patch delta 12 TcmaFM_?D50gL5L&D@G9j8m$9@ delta 12 TcmaFM_?D50gKHwwD@HK@8npw3 diff --git a/onnx/backend/test/data/node/test_gridsample_border_padding/model.onnx b/onnx/backend/test/data/node/test_gridsample_border_padding/model.onnx index 3c548c90ff6c0e514e6466703f89ab734671d237..01fbcef9853af9a67d7f6b35aa1ee0b7e0401779 100644 GIT binary patch delta 12 TcmX@Xc!H6MgL5L&QAQB}7Z3vz delta 12 TcmX@Xc!H6MgKHwwQARNU7Z?K; diff --git a/onnx/backend/test/data/node/test_gridsample_nearest/model.onnx b/onnx/backend/test/data/node/test_gridsample_nearest/model.onnx index 294090833dbdd69f2cedebded868ee607d6eec66..bacb4086e0d019d64cacfaa85cff9c6515d16e02 100644 GIT binary patch delta 12 TcmdnRxQmgAgL5L&c195Z6|w^9 delta 12 TcmdnRxQmgAgKHwwc1AG(6}kfK diff --git a/onnx/backend/test/data/node/test_gridsample_nearest_align_corners_0_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_nearest_align_corners_0_additional_1/model.onnx index 245dcbfba8a9495f7f03b381390e69c5535d7783..2003d009b6d9a0b13367dbfe2792e61703262059 100644 GIT binary patch delta 12 TcmaFM_?D50gL5L&D@G9j8m$9@ delta 12 TcmaFM_?D50gKHwwD@HK@8npw3 diff --git a/onnx/backend/test/data/node/test_gridsample_nearest_align_corners_1_additional_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_nearest_align_corners_1_additional_1/model.onnx index de22147ab3546b7cded516ae6d005bb9eda6bccf..2a105639f0d92e47f23fe34ae28c96136ec3dec5 100644 GIT binary patch delta 12 TcmaFM_?D50gL5L&D@G9j8m$9@ delta 12 TcmaFM_?D50gKHwwD@HK@8npw3 diff --git a/onnx/backend/test/data/node/test_gridsample_reflection_padding/model.onnx b/onnx/backend/test/data/node/test_gridsample_reflection_padding/model.onnx index 203f1434d3449e4d7f67478ce6f6503cb51ec988..9c7f5cf9ab9c67ec6ad5d5aca4fd2e3bef8601c4 100644 GIT binary patch delta 12 Tcmcb>c!80LgL5L&Sw;~67uW+U delta 12 Tcmcb>c!80LgKHwwSw=Ac7vKXf diff --git a/onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_0/model.onnx b/onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_0/model.onnx index 7f90268223a6c9897ad6c9c9c2070262690d53c3..02d940fb147d64c5d205fafc279b7cbcfa8aff75 100644 GIT binary patch delta 12 Tcmey)_??l7gL5L&7e)~P8>a)H delta 12 Tcmey)_??l7gKHww7e+Av8?OVS diff --git a/onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_volumetric_bilinear_align_corners_1/model.onnx index e3781ffb3b6bc31b9173ab2f3d8cbdc5edb99506..af0b03cd424f5360f7d9fd4ba939eddb58bab0d2 100644 GIT binary patch delta 12 Tcmey)_??l7gL5L&7e)~P8>a)H delta 12 Tcmey)_??l7gKHww7e+Av8?OVS diff --git a/onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_0/model.onnx b/onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_0/model.onnx index 706252dc07350bfbf8708bfa0812572d6f5fe3e9..d87cce0017be68d7840b273556765b0d6c50f64a 100644 GIT binary patch delta 12 Tcmey)_??l7gL5L&7e)~P8>a)H delta 12 Tcmey)_??l7gKHww7e+Av8?OVS diff --git a/onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_1/model.onnx b/onnx/backend/test/data/node/test_gridsample_volumetric_nearest_align_corners_1/model.onnx index 2eb750c678e7a06a3aa1a295831784dd8a972247..dd789c06a01c81b8cbb88d8d4bfaf4f07b985431 100644 GIT binary patch delta 12 Tcmey)_??l7gL5L&7e)~P8>a)H delta 12 Tcmey)_??l7gKHww7e+Av8?OVS diff --git a/onnx/backend/test/data/node/test_gridsample_zeros_padding/model.onnx b/onnx/backend/test/data/node/test_gridsample_zeros_padding/model.onnx index 7ed92ef5311a9825cbc09c9e8eac1610ae532c12..fb2b33369d93e5187a7770c211cabed731035977 100644 GIT binary patch delta 12 TcmX@cc#M&WgL5L&VMY-E7TyC5 delta 12 TcmX@cc#M&WgKHwwVMZ|k7UlyG diff --git a/onnx/backend/test/data/node/test_gru_batchwise/model.onnx b/onnx/backend/test/data/node/test_gru_batchwise/model.onnx index 6e8a50d56f382acfcf99fbadbcc85182b54e015b..8d50456cb680a3b4818655f242baad2ed5d72796 100644 GIT binary patch delta 12 TcmaFO_?nT4gMA{?3r0Qw8fgP` delta 12 TcmaFO_?nT4gKHww3q~;j8iNCW diff --git a/onnx/backend/test/data/node/test_gru_defaults/model.onnx b/onnx/backend/test/data/node/test_gru_defaults/model.onnx index 07689ef41373700a811dfb04e454685aaca239ec..36fcba152af486f12f6ebd4e9c51b81d9967f1c6 100644 GIT binary patch delta 12 TcmdnXxR;TMgMA{?E=E2873%`( delta 12 TcmdnXxR;TMgKHwwE=Dl`76k(J diff --git a/onnx/backend/test/data/node/test_gru_seq_length/model.onnx b/onnx/backend/test/data/node/test_gru_seq_length/model.onnx index 5f0369e753023d1b17c40b1f88f0a3afb2003128..2c9c2489c53a7ce8a68d4635cb511c72492d3e84 100644 GIT binary patch delta 12 Tcmcc4c%6}ngMA{?6-GV)7@%( diff --git a/onnx/backend/test/data/node/test_gru_with_initial_bias/model.onnx b/onnx/backend/test/data/node/test_gru_with_initial_bias/model.onnx index d87b797d815ab9be6247dac69304ff16bf72d24a..db0c7de9456cfbaa16020c3ac78393728d5a768a 100644 GIT binary patch delta 12 Tcmcb|c#n~ZgMA{?ZALx-86*Qu delta 12 Tcmcb|c#n~ZgKHwwZALKw89oD8 diff --git a/onnx/backend/test/data/node/test_hardsigmoid/model.onnx b/onnx/backend/test/data/node/test_hardsigmoid/model.onnx index 5070351948dc1f527798bead8ba2f2c23d5245cd..b330b5ce9145222c0a83de9c9a7affdd14f7ec6f 100644 GIT binary patch delta 12 TcmbQlIEj&ogLxuTKO-9e5tss9 delta 12 TcmbQlIEj&ogKHvFKcg4`5z+#5 diff --git a/onnx/backend/test/data/node/test_hardsigmoid_default/model.onnx b/onnx/backend/test/data/node/test_hardsigmoid_default/model.onnx index d0e57f315b158e44366592303a729a228746a469..2672c13398e2490e53d978b8ae6a468700d0e253 100644 GIT binary patch delta 10 Rcmb=dWa40+$W+G21^^AF0uTTI delta 10 Rcmb=dWa8kO$W+EC1^^A<0w(|f diff --git a/onnx/backend/test/data/node/test_hardsigmoid_example/model.onnx b/onnx/backend/test/data/node/test_hardsigmoid_example/model.onnx index ffe09d493db5ce1f65678e4a0b185b6a1945438c..4c1564a09efdd2b09f8b5d1daa684cfc03259ca5 100644 GIT binary patch delta 12 TcmeBT>|$i%V4ld-&d3G;5YPfe delta 12 TcmeBT>|$i%;F`$P&L{=|5efoa diff --git a/onnx/backend/test/data/node/test_hardswish/model.onnx b/onnx/backend/test/data/node/test_hardswish/model.onnx index 0ed0b32120822955a78d58ed1031acca80892ebd..f72506caa0fcd1cfbe9b6a2c0777800e0e828e02 100644 GIT binary patch delta 10 Rcmd1JW#V9;$dtv%2LKGd0r&s_ delta 10 Rcmd1JW#ZtP$dtt>1^^7$0s{a5 diff --git a/onnx/backend/test/data/node/test_hardswish_expanded/model.onnx b/onnx/backend/test/data/node/test_hardswish_expanded/model.onnx index abe72af6be28a474e841f295b16879274122d3a4..ecfe4f31ba5f246b384fd399288a14ffc9fd052e 100644 GIT binary patch delta 13 UcmeBT>SAKzVBg5Z&dA6I01^uU^Z)<= delta 13 UcmeBT>SAKz;M&N<&d4YR01_wx{{R30 diff --git a/onnx/backend/test/data/node/test_image_decoder_decode_jpeg2k_rgb/test_data_set_0/input_0.pb b/onnx/backend/test/data/node/test_image_decoder_decode_jpeg2k_rgb/test_data_set_0/input_0.pb index 5f4cb90f28a29a8bdea0b08b4c067fdb9f086b8b..66db10c971f7bc76eaf32994c299b2dd45aa8f93 100644 GIT binary patch delta 12 TcmaFM_m*$MeMW@ECzS=e delta 12 TcmaFM_m*$MeMX~=4|>@EC!Ynq diff --git a/onnx/backend/test/data/node/test_instancenorm_epsilon/model.onnx b/onnx/backend/test/data/node/test_instancenorm_epsilon/model.onnx index 9cf84c252d0601ce7ed48fa66d246c4420fef0cb..d6ac084c9754749da613a2d1f5f879246b843521 100644 GIT binary patch delta 12 TcmX@dc#e^YgLxv;X+|~x7jy#= delta 12 TcmX@dc#e^YgKHwwX+|*s7p?;+ diff --git a/onnx/backend/test/data/node/test_instancenorm_example/model.onnx b/onnx/backend/test/data/node/test_instancenorm_example/model.onnx index ae190ddd6546ff0099768725e7b2658fcb717f62..37a901970a2fc37397bebf60140a3ad4ef1aca92 100644 GIT binary patch delta 12 TcmdnZxSNrQgLxv;4n{Tr6`2Cq delta 12 TcmdnZxSNrQgKHww4n{Em71ILm diff --git a/onnx/backend/test/data/node/test_lppool_1d_default/model.onnx b/onnx/backend/test/data/node/test_lppool_1d_default/model.onnx index d3d882afbed24b11a6ed25a6b1c5f9d54afdded4..8734aa23c9c2056224aa04b1372b5a84efa0b398 100644 GIT binary patch delta 12 TcmZ3)xQLO7gJUAod`2Mv6H)@6 delta 12 TcmZ3)xQLO7gKHwwd`2+<6Ji3S diff --git a/onnx/backend/test/data/node/test_lppool_2d_default/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_default/model.onnx index 0cd55f03c51f0c76abd53f18492a91e8a4b1bf8f..6d8e9eef7f9b7bee73a2566c335c09c01da37cc1 100644 GIT binary patch delta 12 TcmbQkIERsmgJUAoOhzF961)P8 delta 12 TcmbQkIERsmgKHwwOhz#P63haU diff --git a/onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_dilations/model.onnx index 7970480a7dff811fcdd20fb620f5f7772ae96d98..503ccff12b944fe818451eb8c24ecad9e65c3497 100644 GIT binary patch delta 12 TcmX@Yc!ZIOgJUAoK}I0}7Ni3N delta 12 TcmX@Yc!ZIOgKHwwK}InE7PJEj diff --git a/onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_pads/model.onnx index 4d01b667e63fa2a8820678d468de55c3d4f26033..73121feab9ba97acd22c6a5c5d0c855d75e1edb1 100644 GIT binary patch delta 12 TcmZ3(xQ3C5gJUAoN=6|76ifoV delta 12 TcmZ3(xQ3C5gKHwwN=7jN6kGzr diff --git a/onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_same_lower/model.onnx index 00afca88cc1c8b48d3f7114f56fbda1269cff5a6..dbc14bf1a9bde9686e242f418e2753973cb87974 100644 GIT binary patch delta 12 TcmdnRxQmgAgJUAoc19rp6{-T} delta 12 TcmdnRxQmgAgKHwwc1AG(6}kfK diff --git a/onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_same_upper/model.onnx index 108b05ae157859e54026cf1534468808f1505b59..3216a2a29369b1114b7763de3a3cd7f78f8349e8 100644 GIT binary patch delta 12 TcmdnRxQmgAgJUAoc19rp6{-T} delta 12 TcmdnRxQmgAgKHwwc1AG(6}kfK diff --git a/onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx b/onnx/backend/test/data/node/test_lppool_2d_strides/model.onnx index 0ddc7b602cb368be841c99924a600d3e42881e43..fa7064f299b99e6f0db6e37f26ae1fbe3ccaccb6 100644 GIT binary patch delta 12 TcmZ3-xQ>yDgJUAoYDOUd6n+B2 delta 12 TcmZ3-xQ>yDgKHwwYDO^t6pjMO diff --git a/onnx/backend/test/data/node/test_lppool_3d_default/model.onnx b/onnx/backend/test/data/node/test_lppool_3d_default/model.onnx index becb512e64df1e7848442bbb4592a5f0b821f0fe..e8e653d283820d0d6f4458563d7a253e31005d5c 100644 GIT binary patch delta 12 TcmZ3+xQvmBgJUAoVn!hV6Se}X delta 12 TcmZ3+xQvmBgKHwwVn#6l6UG9t diff --git a/onnx/backend/test/data/node/test_lstm_batchwise/model.onnx b/onnx/backend/test/data/node/test_lstm_batchwise/model.onnx index 29284301f7858d44374663886b815f2848047005..a10fa1840dfa9be61e685c2600ea5c416107ae1d 100644 GIT binary patch delta 12 TcmaFM_?D50gMA{?D@Hy58k+-p delta 12 TcmaFM_?D50gKHwwD@HK@8npw3 diff --git a/onnx/backend/test/data/node/test_lstm_defaults/model.onnx b/onnx/backend/test/data/node/test_lstm_defaults/model.onnx index a7bcc594141364ef42414183ef2a4d2f3d854874..7e6a4af6806a7fbfbbf131170abd09ba2edb6a2b 100644 GIT binary patch delta 12 TcmdnbxSx@UgMA{?9!5R@799fc delta 12 TcmdnbxSx@UgKHww9!4<$7B>R> diff --git a/onnx/backend/test/data/node/test_lstm_with_initial_bias/model.onnx b/onnx/backend/test/data/node/test_lstm_with_initial_bias/model.onnx index caba9ced475860dc6341faf354ee69f16c431138..1574652ffda8e583f527326a781efff5b7cf58ea 100644 GIT binary patch delta 12 TcmaFB_<)g#gMA{?T}D0t8CC;R delta 12 TcmaFB_<)g#gKHwwT}Ckg8E^w$ diff --git a/onnx/backend/test/data/node/test_lstm_with_peepholes/model.onnx b/onnx/backend/test/data/node/test_lstm_with_peepholes/model.onnx index 21a905342a2a958d60f1bcc69e7f241201449e33..c5e3f974c4f3ead374272fd35ea0f1dce7dbaa5d 100644 GIT binary patch delta 13 Ucmey#^plB+gMA}Y86zVf03I&`K>z>% delta 13 Ucmey#^plB+gKHyG86%?@03J*OOaK4? diff --git a/onnx/backend/test/data/node/test_maxpool_1d_default/model.onnx b/onnx/backend/test/data/node/test_maxpool_1d_default/model.onnx index dba3be8456f30c41afc1c1066144e029043926da..d72de35c8e71383a8ade56038ec16b43dfb45081 100644 GIT binary patch delta 12 TcmeBV>||u(V4uj;#>fKz5Y+-q delta 12 TcmeBV>||u(;F`$P#wZ2=5b*+8 diff --git a/onnx/backend/test/data/node/test_maxpool_2d_ceil/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_ceil/model.onnx index 4bb1e32b06df63667916f7c6d3603ec7ebfaa3df..7515f602fcb260a49ef9e4f7fe5085185ca10742 100644 GIT binary patch delta 12 TcmdnWxRsHKgMA{?CPp3r6(Iu9 delta 12 TcmdnWxRsHKgKHwwCPpy;6+Hso diff --git a/onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_ceil_output_size_reduce_by_one/model.onnx index c0b1842bfed08aff9cca2fedd053e3311a5db569..0c136bf91b9d39a970c6373cedc77b43c754f4e7 100644 GIT binary patch delta 12 TcmX@lc%G4ogMA{?8Acue7pnsv delta 12 TcmX@lc%G4ogKHww8AdSx7smrD diff --git a/onnx/backend/test/data/node/test_maxpool_2d_default/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_default/model.onnx index 64c4aff71717c19ff8be4a2ecfa5f45a0e9e89ec..820d4fdc4c40683c3cda9ec75f74038fc2a82e27 100644 GIT binary patch delta 12 TcmbQtIGK@&gMA{?1V$bJ5zhi@ delta 12 TcmbQtIGK@&gKHww1V%9c5$ghX diff --git a/onnx/backend/test/data/node/test_maxpool_2d_dilations/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_dilations/model.onnx index 3c304ff4aa4d6a4308cb70a8f09ac1f9f136f09e..19140d87016d1753d928ae42a9d850768f9978cb 100644 GIT binary patch delta 12 TcmdnPxQCI6gMA{?PDUO870?3Z delta 12 TcmdnPxQCI6gKHwwPDU{R73>1? diff --git a/onnx/backend/test/data/node/test_maxpool_2d_pads/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_pads/model.onnx index 98cbdbe0cc2b12572e3c739345fe09b1eb86785a..a917b6a9094a070d19cdf572c4b9f2acc708695f 100644 GIT binary patch delta 12 TcmZ3&xP+03gMA{?LPj0{6Lc!80LgMA{?Swc!80LgKHwwSw=Ac7vKXf diff --git a/onnx/backend/test/data/node/test_maxpool_2d_precomputed_strides/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_precomputed_strides/model.onnx index 3175fabc99f28ed67bd966825fd0820e1d553cc8..53d8d72a344fb85b0c253faf124afa9909f3687b 100644 GIT binary patch delta 12 TcmdnQxQUU8gMA{?dPW`q6xIUA delta 12 TcmdnQxQUU8gKHwwdPXq-6!HSp diff --git a/onnx/backend/test/data/node/test_maxpool_2d_same_lower/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_same_lower/model.onnx index d972e13bfab13e2a3cb899a3fca1c62b20a204a6..42cd27dcbe0ce774175bde3b2b0637e66a54b5e8 100644 GIT binary patch delta 12 TcmdnQxQUU8gMA{?dPW`q6xIUA delta 12 TcmdnQxQUU8gKHwwdPXq-6!HSp diff --git a/onnx/backend/test/data/node/test_maxpool_2d_same_upper/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_same_upper/model.onnx index 0a4514be9638b3a3fdb4d6991a171a573927640a..b282f64679da6f1e6d79f3e0a5ff81a1766c3a7e 100644 GIT binary patch delta 12 TcmdnQxQUU8gMA{?dPW`q6xIUA delta 12 TcmdnQxQUU8gKHwwdPXq-6!HSp diff --git a/onnx/backend/test/data/node/test_maxpool_2d_strides/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_strides/model.onnx index ad67b18f51aee676249d858915730113ba768042..10a2ac5f1dc179556de9ba2d80d3ad9c20369188 100644 GIT binary patch delta 12 TcmZ3+xQvmBgMA{?Vn!YS6RHBE delta 12 TcmZ3+xQvmBgKHwwVn#6l6UG9t diff --git a/onnx/backend/test/data/node/test_maxpool_2d_uint8/model.onnx b/onnx/backend/test/data/node/test_maxpool_2d_uint8/model.onnx index 8f4fe7855616f51287937749e47c45385aa351fa..f3e38809441b3cdd06ec82a3f13fee408465a33a 100644 GIT binary patch delta 12 TcmZ3=xRjBJgMA{?B1Rqn6OjU- delta 12 TcmZ3=xRjBJgKHwwB1SO)6RiTR diff --git a/onnx/backend/test/data/node/test_maxpool_3d_default/model.onnx b/onnx/backend/test/data/node/test_maxpool_3d_default/model.onnx index c4e27bb94eb4f6c21f4c4176fa36c7e97209f882..15d6e4c265c8bbe52e33e73d547d4fe293ca908a 100644 GIT binary patch delta 12 TcmbQoIFFHugMA{?Y(^dc65;}j delta 12 TcmbQoIFFHugKHwwY(_Bv68-|1 diff --git a/onnx/backend/test/data/node/test_maxpool_3d_dilations/model.onnx b/onnx/backend/test/data/node/test_maxpool_3d_dilations/model.onnx index f9e9844cb8e7ffcec1f88873fdff9fb5464e2187..7b2bb587f785df04b2bfad713d0eeba0fd9695fc 100644 GIT binary patch delta 12 TcmX@bc#4sUgMA{?aYh~h7cK)2 delta 12 TcmX@bc#4sUgKHwwaYiu!7fJ&h diff --git a/onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl/model.onnx b/onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl/model.onnx index 381c4957c39dd8bfd58ba9548e4c3f7f6a9f20ca..b1e4d1e0f6a791610ee619ed0d9d474554d618ba 100644 GIT binary patch delta 12 Tcmcc4c%6}ngMA{?6-FKa7;^(Q delta 12 Tcmcc4c%6}ngKHww6-F@t7>@%( diff --git a/onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl_large/model.onnx b/onnx/backend/test/data/node/test_maxpool_3d_dilations_use_ref_impl_large/model.onnx index 9509ad6e4a96beb60baf5172a28b9d673b43747e..3f27feed75163ab8cc6d48d00f9e42ab9315caa5 100644 GIT binary patch delta 12 TcmaFQ_@0r8gMA{?8%7=g8p{KI delta 12 TcmaFQ_@0r8gKHww8%8kz8s`Ix diff --git a/onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_pads/model.onnx b/onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_pads/model.onnx index 3abdc840c54bd0838009402a6770b5e0b995b423..352bd92939a8d9c644a4cbaf18ae292bbde022e2 100644 GIT binary patch delta 12 Tcmcb^c!!aRgMA{?O-3F781Mr{ delta 12 Tcmcb^c!!aRgKHwwO-3;Q84Lqb diff --git a/onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_strides/model.onnx b/onnx/backend/test/data/node/test_maxpool_with_argmax_2d_precomputed_strides/model.onnx index a67af2fbf6bc1ac205f1101406c54c6ab1599ffd..2d1d01124b6abf6150fda339e372ef32873ce6de 100644 GIT binary patch delta 12 Tcmeyu_=S;)gMA{?M@Ak18%P6< delta 12 Tcmeyu_=S;)gKHwwM@BIK8)O5T diff --git a/onnx/backend/test/data/node/test_maxunpool_export_with_output_shape/model.onnx b/onnx/backend/test/data/node/test_maxunpool_export_with_output_shape/model.onnx index 04673fcc00ee06c392162e58d5102ae8c265450d..117892ddba7a946deae906357a5f298058d033b9 100644 GIT binary patch delta 13 UcmZo;YGY#JVB5&V%*ezW^8f$< diff --git a/onnx/backend/test/data/node/test_maxunpool_export_without_output_shape/model.onnx b/onnx/backend/test/data/node/test_maxunpool_export_without_output_shape/model.onnx index 38cc84cc23c88dfd4505340d5280bbc0cb56b7b7..afc56ebff14c26d79c22f79ea1a222fe54d78351 100644 GIT binary patch delta 12 Tcmcc5c%PApgKZ+y9Y$^d88ib- delta 12 Tcmcc5c%PApgKHww9Y!$#8CL^a diff --git a/onnx/backend/test/data/node/test_mish/model.onnx b/onnx/backend/test/data/node/test_mish/model.onnx index 9b8fb3a8fb207712332372e5e7f9beae182a59ec..a2218ac41b3ed368a1caefef83a198e80e7821d7 100644 GIT binary patch delta 10 RcmWFyW#ZtN$P~mV1ON)S0kr@C delta 10 RcmWFyW#ZtP$P~mV1^^1k0lNSI diff --git a/onnx/backend/test/data/node/test_mish_expanded/model.onnx b/onnx/backend/test/data/node/test_mish_expanded/model.onnx index 465033dec4ed34fba3614771f04572dfd8ed4edd..8e8f56c194440a9ab7d64712843283081aabeaf5 100644 GIT binary patch delta 13 UcmdnWw3Ug8gJUC;5hJ4z02ayudH?_b delta 13 UcmdnWw3Ug8gKHy`5hJ4*02bT=fB*mh diff --git a/onnx/backend/test/data/node/test_nllloss_NC/model.onnx b/onnx/backend/test/data/node/test_nllloss_NC/model.onnx index 0d5f13841d900025ff7772d0a42161957f85436a..53aec2ac36157accfd156b10efa654117ed7eba3 100644 GIT binary patch delta 12 TcmdnWxRsHKgMA{?CPrQW6(R!B delta 12 TcmdnWxRsHKgKHwwCPpy;6+Hso diff --git a/onnx/backend/test/data/node/test_nllloss_NC_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NC_expanded/model.onnx index 0a13d6ffcbcd41b46fbd275cd3ff9a18ca005aba..fc00ac93ebed3763e7b06b5121d36d569f1f7d09 100644 GIT binary patch delta 13 UcmX@leV&_%gMA~@8CFJK032uoSO5S3 delta 13 UcmX@leV&_%gKHzx8CFIy033z`WB>pF diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1/model.onnx index 189dc6c3c00f4d1016cd76be56c94951b872ec74..470693beb696843a28f8b7c71831803bf965bc30 100644 GIT binary patch delta 12 TcmdnZxSNrQgMA{?4n|%86}ST9 delta 12 TcmdnZxSNrQgKHww4n{Em71ILm diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1_expanded/model.onnx index d63471102cdfca75be84ed97c8096988e34b3803..20d2ead61dde197de53cde9e8133bee634ba413f 100644 GIT binary patch delta 13 UcmZ3+yNs8KgMA~@Vm3xz02mtr;s5{u delta 13 UcmZ3+yNs8KgKHzxVm3xG02ny}?f?J) diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1_ii/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1_ii/model.onnx index b823ccbb2fbb19ed9ad6bd66ff6378ca2425568b..c855c5a81b352a16f9394e573c4528de269dedb2 100644 GIT binary patch delta 12 Tcmcc2c$txjgMA{?1x8)~7!U(1 delta 12 Tcmcc2c$txjgKHww1x7Id7%Kxe diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1_ii_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1_ii_expanded/model.onnx index c75e4ea1f7880b050bda228674b1d1ce6b28f9a4..34ee1d47465da7445c6f0141b02e42abd7a4503a 100644 GIT binary patch delta 13 Ucmdn5x?h!vgMA~@9wA0v038bhX#fBK delta 13 Ucmdn5x?h!vgKHzx9wA0C039gSto&VBg5Z!^p@B01}o00ssI2 delta 13 UcmeBY>Sto&;M&N1XIsgCw diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2/model.onnx index b91f1dfac1a7929a3d75b848348ca519592d3786..76f4cfaa31e02f60c1f73a4e4150678c582808ae 100644 GIT binary patch delta 12 Tcmcb}c#)BbgMA{?IYwRp7v2LU delta 12 Tcmcb}c#)BbgKHwwIYu!67x@D* diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_expanded/model.onnx index e07381d064d3204418c3a3928e5e890487260280..72777a5c1f134b121dbd97d2a8eebbfd81c4b28f 100644 GIT binary patch delta 13 UcmZ3@vzmvAgMA~D1{)(U02Yw~ZvX%Q delta 13 UcmZ3@vzmvAgKHy`1{GZ?gMA{?XGUHC8+!wk delta 12 Tcmeyy_>GZ?gKHwwXGSpq8Qi~s-t delta 13 Ucmcc1f0v($gKHzxEp|pR03K`um;e9( diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight/model.onnx index 1fbc64e773b0582848b37ef873b8d4818b092365..afeef04cc45d2c66964eca8f8740727e4e0aea72 100644 GIT binary patch delta 12 Tcmey(_?wZ5gMA{?4@O=98~6jG delta 12 Tcmey(_?wZ5gKHww4@NNn91{bt diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_expanded/model.onnx index 5b9611af54a29102409a3cbf8041fb73cf7876ad..bb187cea67ddb0490bb8a1052e7e378724290e61 100644 GIT binary patch delta 13 UcmZ21xLA;hgMA~@0uDxA02n0#;{X5v delta 13 UcmZ21xLA;hgKHzx0uDwo02o68?*IS* diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean/model.onnx index 5e928bfc0e5ebbd080d83174e56bf03e6ae8293f..19ae33e1f2ebe90c76fc5402063b21547a389289 100644 GIT binary patch delta 12 Tcmeyz_>Yl^gMA{?Z$@4K976-F delta 12 Tcmeyz_>Yl^gKHwwZ$>cy99{#s diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_mean_expanded/model.onnx index d8bd66f76a2a8f99e6e1695e275e19a54127bb3f..50f66085ace2d73e9bc549e24d6b21c0ce6edfea 100644 GIT binary patch delta 13 Ucmew(`A3q8gMA~@PaZ~I03w|P0ssI2 delta 13 Ucmew(`A3q8gKHzxPaZ}w03y2t4gdfE diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum/model.onnx index 50d52ec64b82f8eb7423c72d598299f14cae9d3a..fcc48702c96a24148f7c383057cf986656172741 100644 GIT binary patch delta 12 Tcmeyv_=k~+gMA{?Pext<91#Pi delta 12 Tcmeyv_=k~+gKHwwPew5S94rH} diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_expanded/model.onnx index 4dd5a25b1fc942e853678cd2933a7cb4964b1a86..06e5b65e6d0fe75c3989cc4c343af6c1c41eec5d 100644 GIT binary patch delta 13 UcmZ1_x=NIZgMA~@axO+*02xOE0ssI2 delta 13 UcmZ1_x=NIZgKHzxaxO+O02yTi4gdfE diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii/model.onnx index 5fb3c75bfe3ddc47fbc09b1c12e2a8475512feff..30e926e943b9c01ae6c367e856b2d10de2ece6c6 100644 GIT binary patch delta 13 UcmbQjG=+(YgMA~DAR{9$023kt5dZ)H delta 13 UcmbQjG=+(YgKHy`AS0s~024q09RL6T diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded/model.onnx index 31c93d17e97ac8ebb9fe6689fc45351e3ea5ab77..acc84e0bef14f9cabe6742673c2cfa5f399307c7 100644 GIT binary patch delta 13 Ucmcbodry~%gMA~@Z81h(03j;_+5i9m delta 13 Ucmcbodry~%gKHzxZ81hM03k^O<^TWy diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii/model.onnx index f7f4ff93df418c3f262c3b379c35429a7d49e53e..f1022a4bd9f0853499298c224996bded53ff4644 100644 GIT binary patch delta 13 UcmbQiG=qtWgMA~DC?g{;027h|9RL6T delta 13 UcmbQiG=qtWgKHy`C?lg7028nRDF6Tf diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded/model.onnx index 0416417bfd779d02725b32999377b2defe71968b..4f236df7a8c08ed04536697ba794c77e71c2fc07 100644 GIT binary patch delta 13 UcmZqEZqsJsVBg5pEXv3W02f>W%>V!Z delta 13 UcmZqEZqsJs;M&O4EXpVb02g`!*#H0l diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii/model.onnx index eebc8c5b7f1dc1cdcf222c1bbaca5439402e3083..8bb7c542065fb19f51483d98419280f0ca34ec00 100644 GIT binary patch delta 12 Tcmeyv_=k~+gMA{?Pext<91#Pi delta 12 Tcmeyv_=k~+gKHwwPew5S94rH} diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded/model.onnx index 4d8c1c76e33d241ed7b3a5954a60c687ba99c11c..ea7d7177f20dccb464e53f16d290d3c80cb5f13e 100644 GIT binary patch delta 13 UcmX@AbySOqgMA~DlPDuE030d<2mk;8 delta 13 UcmX@AbySOqgKHy`lPIGY031jI6aWAK diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight/model.onnx index 178faf552df192ddd5ecdf249c28aab7fa6f1011..248908917d760c75f8dce7842c8eac18aeece645 100644 GIT binary patch delta 13 UcmeBW>Sbc$VBg5Z#mLAD01{pS`~Uy| delta 13 UcmeBW>Sbc$;M&N<#mFcI01|ux2mk;8 diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_mean_weight_expanded/model.onnx index ede9e00357a258b441016459e628f51ffeee48f9..db7aea4c097c76e7c18a847a0349231a91d20de0 100644 GIT binary patch delta 13 UcmbOtF-3xjgMA~DAP*xi02LSAKzVBg5Z&dA6M01^rT^8f$< delta 13 UcmeBT>SAKz;M&N<&d4YR01_wx{{R30 diff --git a/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded/model.onnx b/onnx/backend/test/data/node/test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded/model.onnx index 89543e5650ca6677d8252f938d1847fa272c1978..e919d1d37c2ddcc08efee66962054477fbdd2057 100644 GIT binary patch delta 13 UcmdnSzm1=XgMA~@W_Cti02&Gd7XSbN delta 13 UcmdnSzm1=XgKHzxW_Cs~02(L*BLDyZ diff --git a/onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb index 012db71244298f72c48ca400874a2a62a477353b..a6e11c419bfcc419d1ad6e5132b0affc3eb8b524 100644 GIT binary patch literal 19 acmd;J6cE&6WN_lDN-fHdFUZf#D**r?f&}XT literal 21 ccmd;J6cE&6WN_lCjIT;9%8xI|&&(?U04t#d#Q*>R diff --git a/onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb index b7eca6fbcfed5524285075d7d03e894d25c122f9..8749ea311c59cb722a6f7bb960c2fbab886d95cb 100644 GIT binary patch literal 19 acmd;J6cE;8WN_lDN-fHdFUZf#D**r?q6F;# literal 21 ccmd;J6cE;8WN_lCjIT;9%8xI|&&(?U04v1>#{d8T diff --git a/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb index 7a0bc58621b..9fe0fc5b846 100644 --- a/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb @@ -1 +1,2 @@ -*B y_zero_point \ No newline at end of file +*B +zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb index b0fdf1565af..412ac47a2c0 100644 --- a/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb @@ -1 +1,2 @@ -*B y_zero_point \ No newline at end of file +*B +zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_rnn_seq_length/model.onnx b/onnx/backend/test/data/node/test_rnn_seq_length/model.onnx index 7a025009f9be2f6e8fad09bc2f79d90922936cb9..e8273e546cd9bca066cee16606372e56a1c65a86 100644 GIT binary patch delta 12 Tcmcc4c%6}ngMA{?6-GV)7@%( diff --git a/onnx/backend/test/data/node/test_roialign_aligned_false/model.onnx b/onnx/backend/test/data/node/test_roialign_aligned_false/model.onnx index cd9dfe7c2ded0f6592c06af851f18f13e7aa5076..36f9ea44fdfb45c005a23bdb9659ba9954b5ee24 100644 GIT binary patch delta 13 UcmaFB^ni(pgJUC8EF+@;02_P)`2YX_ delta 13 UcmaFB^ni(pgKHyGEF+^B02`140RR91 diff --git a/onnx/backend/test/data/node/test_roialign_aligned_true/model.onnx b/onnx/backend/test/data/node/test_roialign_aligned_true/model.onnx index eb22ab1cbac75997af1c2a75c30327b70b5858ff..0626de8e12c7ef9e3ae7f0dc6dc91498f0a0e36e 100644 GIT binary patch delta 13 Ucmcb?bc2bBgJUC8C?lf)02-VE;Q#;t delta 13 Ucmcb?bc2bBgKHyGC?lg702;6Y=>Px# diff --git a/onnx/backend/test/data/node/test_roialign_mode_max/model.onnx b/onnx/backend/test/data/node/test_roialign_mode_max/model.onnx index c27207af408f59bc302c445ed1ee0c5a66fdae8b..c49bcbcebded3cc87c10cf39ea8538d269a6ab95 100644 GIT binary patch delta 13 UcmaFO^qPr@gJUC81|y>Y035Ib8UO$Q delta 13 UcmaFO^qPr@gKHyG1|y>w035^vA^-pY diff --git a/onnx/backend/test/data/node/test_round/model.onnx b/onnx/backend/test/data/node/test_round/model.onnx index 385814c703d6f5038164b5f56746dd0480a768ce..5551b49085dc0d914a950f0864b3e675cdb40c98 100644 GIT binary patch delta 10 RcmWFyW#V9)$P~oL4FC$I0jvN3 delta 10 RcmWFyW#ZtP$P~mV1^^1k0lNSI diff --git a/onnx/backend/test/data/node/test_selu/model.onnx b/onnx/backend/test/data/node/test_selu/model.onnx index 364ae8436137b1b20f4f6a7fdd420be0e7fef4e2..969a340a7416704eedb0ad9418aea36cb05b5bd8 100644 GIT binary patch delta 12 TcmZo=Y-MEPV4ld-#K;B!5K{s+ delta 12 TcmZo=Y-MEP;F`$P#3%*;5RC#& diff --git a/onnx/backend/test/data/node/test_selu_default/model.onnx b/onnx/backend/test/data/node/test_selu_default/model.onnx index 32f7c62f4cd4b325d4b8f6b303ed203e5d1b428a..72e2bd3f48a8aeca1c4bbff00c7bec0214b07721 100644 GIT binary patch delta 10 Rcmd1KX5wI;$dtjz1^^6l0p_@}gMA{?BSt;|8Pfw| delta 12 TcmaFL_>_@}gKHwwBStX*8SMjY diff --git a/onnx/backend/test/data/node/test_sin/model.onnx b/onnx/backend/test/data/node/test_sin/model.onnx index 23fdf09faf51fbc8b0370375d71673f398f112e2..724fc74d610f215ab69815bf04283d976a682021 100644 GIT binary patch delta 10 RcmYdHWa40+$P~xO4gd?60m%RW delta 10 RcmYdHWa8kO$P~vY1^^4t0p9=s diff --git a/onnx/backend/test/data/node/test_sin_example/model.onnx b/onnx/backend/test/data/node/test_sin_example/model.onnx index a2678a541819ad96ec621aaa615e73bc62c3ff24..abdb5ae6e3e107894e337b530de9da824c0d8dd6 100644 GIT binary patch delta 10 Rcma!yWa40+$P~uN4gd=00kHr8 delta 10 Rcma!yWa8kO$P~sX1^^2n0mlFU diff --git a/onnx/backend/test/data/node/test_sinh/model.onnx b/onnx/backend/test/data/node/test_sinh/model.onnx index 54cb1d7cdc3f1e60ff9c7e49fedc321208cdf6b2..496db2d0821322653865ca6d91ea3b8322b032fc 100644 GIT binary patch delta 10 RcmYdJX5wI($dtgy2>=Vr0nz{f delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_sinh_example/model.onnx b/onnx/backend/test/data/node/test_sinh_example/model.onnx index b47d538826c0bf0eba5bcb6dfdba52e120031691..640cf0a01e85561d369c410c96cec3c82694dc1e 100644 GIT binary patch delta 10 Rcma!!X5wI($P~fI2>=Tl0lEMH delta 10 Rcma!!X5!$Q$P~dS1^^380nGpa diff --git a/onnx/backend/test/data/node/test_softplus/model.onnx b/onnx/backend/test/data/node/test_softplus/model.onnx index 4c2f549a2589324e9aaf2ee413715a988fe29c0a..8134a2da809644fa32a6da511605cbf706b97793 100644 GIT binary patch delta 10 Rcmd1KX5wI;$dtjz2mlOi0pS1u delta 10 Rcmd1KX5!$Q$dth-1^^7K0sQ~~ diff --git a/onnx/backend/test/data/node/test_softplus_example/model.onnx b/onnx/backend/test/data/node/test_softplus_example/model.onnx index ce4dbff63485c97c9e7f0a0b8d5525d697b96ce2..71e49dca20460145267b61fcdf1be7259255d382 100644 GIT binary patch delta 10 RcmYdJX5wI;$dtgy2mlMc0m%RW delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_softsign/model.onnx b/onnx/backend/test/data/node/test_softsign/model.onnx index 08a665a020bab33ad504efcf389c87677f9900cb..8da75bcf950f6bdf845edc2b4e0679b6fb6a9c09 100644 GIT binary patch delta 10 Rcmd1KX5wI;$dtjz2mlOi0pS1u delta 10 Rcmd1KX5!$Q$dth-1^^7K0sQ~~ diff --git a/onnx/backend/test/data/node/test_softsign_example/model.onnx b/onnx/backend/test/data/node/test_softsign_example/model.onnx index 08202cbeeb32cb3af899c1a0b4062dbbef573c7b..215aa383289b3040852e9cd2f1983fe8c0ed628b 100644 GIT binary patch delta 10 RcmYdJX5wI;$dtgy2mlMc0m%RW delta 10 RcmYdJX5!$Q$dte+1^^5E0p$Py diff --git a/onnx/backend/test/data/node/test_tan/model.onnx b/onnx/backend/test/data/node/test_tan/model.onnx index 3d0edfc0314a9b50d6f634f5cf24c2cb323fb307..33166fd26cea6cde9e00b0cea49d36ab997cb0eb 100644 GIT binary patch delta 10 RcmYdHWa40+$P~xO4gd?60m%RW delta 10 RcmYdHWa8kO$P~vY1^^4t0p9=s diff --git a/onnx/backend/test/data/node/test_tan_example/model.onnx b/onnx/backend/test/data/node/test_tan_example/model.onnx index f03792d990bc42d9ff6fea70e179178924058662..36bb4a7a285e4007089de23b0254499c35cde5e7 100644 GIT binary patch delta 10 Rcma!yWa40+$P~uN4gd=00kHr8 delta 10 Rcma!yWa8kO$P~sX1^^2n0mlFU diff --git a/onnx/backend/test/data/node/test_thresholdedrelu/model.onnx b/onnx/backend/test/data/node/test_thresholdedrelu/model.onnx index 32d9a52c9b900fad6e183b74017ef9d8e02f21b5..2e7eaf545e20be1a87402f545e0a2efbad724326 100644 GIT binary patch delta 12 TcmeBT>|$i%V4cX+&d3D-5a0q! delta 12 TcmeBT>|$i%;F`$P&L{=|5efoa diff --git a/onnx/backend/test/data/node/test_thresholdedrelu_default/model.onnx b/onnx/backend/test/data/node/test_thresholdedrelu_default/model.onnx index bc486141ff9e6352f0813c4b31252d6ba6ba3572..73ecc6235facf6a70951d7f70042cee6e9bfb414 100644 GIT binary patch delta 12 TcmZo5Gevd diff --git a/onnx/backend/test/data/node/test_thresholdedrelu_example/model.onnx b/onnx/backend/test/data/node/test_thresholdedrelu_example/model.onnx index 9745441bd2e78dbce10221b5d156d8852d15800b..3a4df8b25211598dc122f60d1485900afa44fa79 100644 GIT binary patch delta 12 TcmZo-Y+_{MV4cWR&&UM;5Eue8 delta 12 TcmZo-Y+_{M;F`!(&nN}}5JCb( diff --git a/onnx/backend/test/data/node/test_training_dropout/model.onnx b/onnx/backend/test/data/node/test_training_dropout/model.onnx index 2fb3db0fa68e1f0e5ea09f76cdd8df3ceba49459..2666f9f7eeb306d2c96c8977179c17b7eab36b53 100644 GIT binary patch delta 12 TcmZ3$xPXy~gMA{?Tt;316BPoI delta 12 TcmZ3$xPXy~gKHwwTt+bf6EFgv diff --git a/onnx/backend/test/data/node/test_training_dropout_default/model.onnx b/onnx/backend/test/data/node/test_training_dropout_default/model.onnx index 2a02f262702784930c124dd828a66fb7c527d76f..3dd1fade7c5ebe18357028ff825c76c8ba8d5208 100644 GIT binary patch delta 12 TcmZ3%xPp<1gMA{?Qbt|?6Ws!; delta 12 TcmZ3%xPp<1gKHwwQbsWV6ZitQ diff --git a/onnx/backend/test/data/node/test_training_dropout_default_mask/model.onnx b/onnx/backend/test/data/node/test_training_dropout_default_mask/model.onnx index 73f140c1494f7fb4b4c7170f29dea5a9c12a8f33..f522d20666f4910a0c195943c26c166eef16a8bc 100644 GIT binary patch delta 12 TcmX@fc#@HcgMA{?F-Beh7Zw8z delta 12 TcmX@fc#@HcgKHwwF-9=}7cm1F diff --git a/onnx/backend/test/data/node/test_training_dropout_mask/model.onnx b/onnx/backend/test/data/node/test_training_dropout_mask/model.onnx index 7a2cf92a4961932d4786b57e21f163090e7a14b0..a6ea42104b340b9592a9583d328888f572720cb3 100644 GIT binary patch delta 12 TcmX@ec#x5agMA{?K1N;u7ES{7 delta 12 TcmX@ec#x5agKHwwK1MMB7HI) and produces one output data (Tensor) of the same shape, +where $Swish(x) = x * sigmoid(beta * x)$. +)DOC"; + +ONNX_OPERATOR_SET_SCHEMA( + Swish, + 22, + OpSchema() + .SetDoc(Swish_ver22_doc) + .Input(0, "X", "Input tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable) + .Output(0, "Y", "Output tensor", "T", OpSchema::Single, true, 1, OpSchema::Differentiable) + .TypeConstraint( + "T", + {"tensor(float16)", "tensor(float)", "tensor(bfloat16)"}, + "Constrain input and output types to float tensors.") + .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput) + .FunctionBody( + R"ONNX( + { + Alpha = Constant () + AlphaCast = CastLike (Alpha, X) + AlphaMulX = Mul (AlphaCast, X) + SigmoidAlphaMulX = Sigmoid(AlphaMulX) + Y = Mul (X, SigmoidAlphaMulX) + } + )ONNX", + 22)); + static const char* Exp_ver13_doc = R"DOC( Calculates the exponential of the given input tensor, element-wise. )DOC"; diff --git a/onnx/defs/operator_sets.h b/onnx/defs/operator_sets.h index ad2791524e7..5574512fb16 100644 --- a/onnx/defs/operator_sets.h +++ b/onnx/defs/operator_sets.h @@ -1234,6 +1234,7 @@ class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Onnx, 22, RNN); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Onnx, 22, GRU); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Onnx, 22, LSTM); class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Onnx, 22, GridSample); +class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(Onnx, 22, Swish); // Iterate over schema from ai.onnx version 22 class OpSet_Onnx_ver22 { @@ -1287,6 +1288,7 @@ class OpSet_Onnx_ver22 { fn(GetOpSchema()); fn(GetOpSchema()); fn(GetOpSchema()); + fn(GetOpSchema()); } }; diff --git a/onnx/test/version_converter/automatic_upgrade_test.py b/onnx/test/version_converter/automatic_upgrade_test.py index 10627b8c323..1b2e95913e8 100644 --- a/onnx/test/version_converter/automatic_upgrade_test.py +++ b/onnx/test/version_converter/automatic_upgrade_test.py @@ -1301,6 +1301,9 @@ def test_Sum(self) -> None: attrs={"consumed_inputs": [0]}, ) + def test_Swish(self) -> None: + self._test_op_upgrade("Swish", 22) + def test_Tanh(self) -> None: self._test_op_upgrade("Tanh", 1, attrs={"consumed_inputs": [0]})