From 2b770ab5174b9551e4e0e3b2912a4246df97c2eb Mon Sep 17 00:00:00 2001 From: "G. Ramalingam" Date: Fri, 22 Sep 2023 16:56:22 -0700 Subject: [PATCH] Clarify reduction behavior for an empty set of values (#5568) Clarify the behavior of the reduction-ops when reducing an empty set of values, by updating the test-cases and documentation. It is useful in various edge-cases. For example, ReduceProd should return 1 for an empty tensor, and ReduceSum should return 0 for an empty tensor. (See https://github.com/onnx/onnx/issues/3651#issuecomment-1454338883) ### Summary ReduceSum ({}) = 0 ReduceProd ({}) = 1 ReduceMin ({}) = Max. value of datatype ReduceMax ({}) = Min. value of datatype ReduceLogSum ({}) = minus infinity or undefined for datatypes without minus infinity ReduceLogSumExp ({}) = minus infinity or undefined for datatypes without minus infinity ReduceMean ({}) = Undefined --------- Signed-off-by: Ganesan Ramalingam Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: gramalingam --- docs/Changelog.md | 101 ++++-- docs/Operators.md | 306 +++++++++++++++++- docs/TestCoverage.md | 274 +++++++++++++++- onnx/backend/test/case/node/reduce_log_sum.py | 25 ++ .../test/case/node/reduce_log_sum_exp.py | 25 ++ onnx/backend/test/case/node/reducel1.py | 24 ++ onnx/backend/test/case/node/reducel2.py | 24 ++ onnx/backend/test/case/node/reducemin.py | 26 ++ onnx/backend/test/case/node/reduceprod.py | 24 ++ onnx/backend/test/case/node/reducesum.py | 50 +++ .../backend/test/case/node/reducesumsquare.py | 24 ++ onnx/defs/reduction/defs.cc | 30 +- onnx/defs/reduction/old.cc | 61 ++-- onnx/defs/reduction/utils.cc | 8 +- onnx/defs/reduction/utils.h | 24 +- onnx/reference/ops/_op.py | 8 + onnx/reference/ops/op_reduce_log_sum.py | 5 + onnx/reference/ops/op_reduce_log_sum_exp.py | 6 + onnx/reference/ops/op_reduce_max.py | 16 + onnx/reference/ops/op_reduce_min.py | 16 + onnx/test/shape_inference_test.py | 28 ++ onnx/test/test_backend_onnxruntime.py | 16 + 22 files changed, 1027 insertions(+), 94 deletions(-) diff --git a/docs/Changelog.md b/docs/Changelog.md index 1047c8b99b5..ffb2175d7e7 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3193,7 +3193,8 @@ This version of the operator has been available since version 1 of the default O Computes the L1 norm of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3236,7 +3237,8 @@ This version of the operator has been available since version 1 of the default O Computes the L2 norm of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3279,7 +3281,8 @@ This version of the operator has been available since version 1 of the default O Computes the log sum of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3322,7 +3325,8 @@ This version of the operator has been available since version 1 of the default O Computes the log sum exponent of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3365,7 +3369,8 @@ This version of the operator has been available since version 1 of the default O Computes the max of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3408,7 +3413,8 @@ This version of the operator has been available since version 1 of the default O Computes the mean of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields undefined. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3451,7 +3457,8 @@ This version of the operator has been available since version 1 of the default O Computes the min of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3494,7 +3501,8 @@ This version of the operator has been available since version 1 of the default O Computes the product of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields 1. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3537,7 +3545,8 @@ This version of the operator has been available since version 1 of the default O Computes the sum of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -3580,7 +3589,8 @@ This version of the operator has been available since version 1 of the default O Computes the sum square of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -10293,6 +10303,7 @@ This version of the operator has been deprecated since version 10 of the default Computes the indices of the max elements of the input tensor's element along the provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + The input tensor must not be empty. The type of the output tensor is integer. #### Version @@ -10334,6 +10345,7 @@ This version of the operator has been available since version 11 of the default Computes the indices of the min elements of the input tensor's element along the provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. + The input tensor must not be empty. The type of the output tensor is integer. #### Version @@ -12408,7 +12420,8 @@ This version of the operator has been available since version 11 of the default Computes the max of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -12494,7 +12507,8 @@ This version of the operator has been available since version 11 of the default Computes the min of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then - the resulted tensor have the reduced dimension pruned. + the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. @@ -17222,7 +17236,8 @@ This version of the operator has been available since version 13 of the default Computes the L1 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17266,7 +17281,8 @@ This version of the operator has been available since version 13 of the default Computes the L2 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17310,7 +17326,8 @@ This version of the operator has been available since version 13 of the default Computes the log sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17354,7 +17371,8 @@ This version of the operator has been available since version 13 of the default Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17398,7 +17416,8 @@ This version of the operator has been available since version 13 of the default Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17442,7 +17461,8 @@ This version of the operator has been available since version 13 of the default Computes the mean of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields undefined. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17486,7 +17506,8 @@ This version of the operator has been available since version 13 of the default Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17530,7 +17551,8 @@ This version of the operator has been available since version 13 of the default Computes the product of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 1. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17574,7 +17596,8 @@ This version of the operator has been available since version 13 of the default Computes the sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -17620,7 +17643,8 @@ This version of the operator has been available since version 13 of the default Computes the sum square of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21801,7 +21825,8 @@ This version of the operator has been available since version 18 of the default Computes the L1 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21847,7 +21872,8 @@ This version of the operator has been available since version 18 of the default Computes the L2 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21893,7 +21919,8 @@ This version of the operator has been available since version 18 of the default Computes the log sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21939,7 +21966,8 @@ This version of the operator has been available since version 18 of the default Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21985,7 +22013,8 @@ This version of the operator has been available since version 18 of the default Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22031,7 +22060,8 @@ This version of the operator has been available since version 18 of the default Computes the mean of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields undefined. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22077,7 +22107,8 @@ This version of the operator has been available since version 18 of the default Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22123,7 +22154,8 @@ This version of the operator has been available since version 18 of the default Computes the product of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 1. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22169,7 +22201,8 @@ This version of the operator has been available since version 18 of the default Computes the sum square of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -24288,7 +24321,8 @@ This version of the operator has been available since version 20 of the default Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. + If the input data type is Boolean, the comparison should consider `False < True`. @@ -24336,7 +24370,8 @@ This version of the operator has been available since version 20 of the default Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. + If the input data type is Boolean, the comparison should consider `False < True`. diff --git a/docs/Operators.md b/docs/Operators.md index a639ebda524..3173b32c4c4 100644 --- a/docs/Operators.md +++ b/docs/Operators.md @@ -20726,7 +20726,8 @@ expect(node, inputs=[x], outputs=[y], name="test_reciprocal") Computes the L1 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -20859,6 +20860,36 @@ expect( +
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceL1", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l1_empty_set", +) +``` + +
+ +
keepdims @@ -20954,7 +20985,8 @@ expect( Computes the L2 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21093,6 +21125,36 @@ expect(
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceL2", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l2_empty_set", +) +``` + +
+ +
keepdims @@ -21200,7 +21262,8 @@ expect( Computes the log sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21246,6 +21309,37 @@ Other versions of this operator: 1, +empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceLogSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = np.log(zero) # -inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_empty_set", +) +``` + +
+ +
keepdims @@ -21336,7 +21430,8 @@ expect( Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or undefined otherwise. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -21470,6 +21565,37 @@ expect(
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceLogSumExp", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = np.log(zero) # -inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_exp_empty_set", +) +``` + +
+ +
keepdims @@ -21567,7 +21693,8 @@ expect( Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise. + If the input data type is Boolean, the comparison should consider `False < True`. @@ -21843,7 +21970,8 @@ expect( Computes the mean of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields undefined. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22080,7 +22208,8 @@ expect( Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields plus infinity (if supported by the datatype) or the maximum value of the data type otherwise. + If the input data type is Boolean, the comparison should consider `False < True`. @@ -22256,6 +22385,38 @@ expect(
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceMin", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +one = np.array(np.ones(reduced_shape, dtype=np.float32)) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = one / zero # inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_min_empty_set", +) +``` + +
+ +
keepdims @@ -22359,7 +22520,8 @@ expect( Computes the product of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 1. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22490,6 +22652,36 @@ expect(
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceProd", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.ones(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_prod_empty_set", +) +``` + +
+ +
keepdims @@ -22585,7 +22777,8 @@ expect( Computes the sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22760,6 +22953,37 @@ expect(
+
+empty_set + +```python +"""Test case with the reduced-axis of size zero.""" +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", +) +``` + +
+ +
keepdims @@ -22846,12 +23070,44 @@ expect(
+
+non_reduced_axis_zero + +```python +"""Test case with the non-reduced-axis of size zero.""" +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 0, 1] + +node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([2], dtype=np.int64) +reduced = np.array([], dtype=np.float32).reshape(reduced_shape) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", +) +``` + +
+ + ###
**ReduceSumSquare** Computes the sum square of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. + valid. Reduction over an empty set of values yields 0. + The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. @@ -22987,6 +23243,36 @@ expect( +
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceSumSquare", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_square_empty_set", +) +``` + +
+ +
keepdims diff --git a/docs/TestCoverage.md b/docs/TestCoverage.md index 7589b3d1598..b8cf5520ef9 100644 --- a/docs/TestCoverage.md +++ b/docs/TestCoverage.md @@ -13837,7 +13837,7 @@ expect(node, inputs=[x], outputs=[y], name="test_reciprocal") ### ReduceL1 -There are 4 test cases, listed as following: +There are 5 test cases, listed as following:
default_axes_keepdims @@ -13920,6 +13920,34 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceL1", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l1_empty_set", +) +``` +
keepdims @@ -14010,7 +14038,7 @@ expect( ### ReduceL2 -There are 4 test cases, listed as following: +There are 5 test cases, listed as following:
default_axes_keepdims @@ -14099,6 +14127,34 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceL2", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l2_empty_set", +) +``` +
keepdims @@ -14201,7 +14257,36 @@ expect( ### ReduceLogSum -There are 3 test cases, listed as following: +There are 4 test cases, listed as following: +
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceLogSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = np.log(zero) # -inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_empty_set", +) +``` + +
keepdims @@ -14284,7 +14369,7 @@ expect( ### ReduceLogSumExp -There are 4 test cases, listed as following: +There are 5 test cases, listed as following:
default_axes_keepdims @@ -14368,6 +14453,35 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceLogSumExp", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = np.log(zero) # -inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_exp_empty_set", +) +``` +
keepdims @@ -14859,7 +14973,7 @@ expect( ### ReduceMin -There are 5 test cases, listed as following: +There are 6 test cases, listed as following:
bool_inputs @@ -14981,6 +15095,36 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceMin", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +one = np.array(np.ones(reduced_shape, dtype=np.float32)) +zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) +reduced = one / zero # inf + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_min_empty_set", +) +``` +
keepdims @@ -15079,7 +15223,7 @@ expect( ### ReduceProd -There are 4 test cases, listed as following: +There are 5 test cases, listed as following:
default_axes_keepdims @@ -15160,6 +15304,34 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceProd", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.ones(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_prod_empty_set", +) +``` +
keepdims @@ -15250,7 +15422,7 @@ expect( ### ReduceSum -There are 5 test cases, listed as following: +There are 7 test cases, listed as following:
default_axes_keepdims @@ -15373,6 +15545,35 @@ expect( ) ``` +
+
+empty_set + +```python +"""Test case with the reduced-axis of size zero.""" +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", +) +``` +
keepdims @@ -15455,11 +15656,40 @@ expect( ) ``` +
+
+non_reduced_axis_zero + +```python +"""Test case with the non-reduced-axis of size zero.""" +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 0, 1] + +node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([2], dtype=np.int64) +reduced = np.array([], dtype=np.float32).reshape(reduced_shape) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", +) +``` +
### ReduceSumSquare -There are 4 test cases, listed as following: +There are 5 test cases, listed as following:
default_axes_keepdims @@ -15545,6 +15775,34 @@ expect( ) ``` +
+
+empty_set + +```python +shape = [2, 0, 4] +keepdims = 1 +reduced_shape = [2, 1, 4] + +node = onnx.helper.make_node( + "ReduceSumSquare", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, +) + +data = np.array([], dtype=np.float32).reshape(shape) +axes = np.array([1], dtype=np.int64) +reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + +expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_square_empty_set", +) +``` +
keepdims diff --git a/onnx/backend/test/case/node/reduce_log_sum.py b/onnx/backend/test/case/node/reduce_log_sum.py index f9af72fa609..a76bf435bce 100644 --- a/onnx/backend/test/case/node/reduce_log_sum.py +++ b/onnx/backend/test/case/node/reduce_log_sum.py @@ -76,3 +76,28 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_log_sum_negative_axes", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceLogSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) + reduced = np.log(zero) # -inf + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_empty_set", + ) diff --git a/onnx/backend/test/case/node/reduce_log_sum_exp.py b/onnx/backend/test/case/node/reduce_log_sum_exp.py index 2352665d792..a691b16979d 100644 --- a/onnx/backend/test/case/node/reduce_log_sum_exp.py +++ b/onnx/backend/test/case/node/reduce_log_sum_exp.py @@ -165,3 +165,28 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_log_sum_exp_negative_axes_keepdims_random", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceLogSumExp", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) + reduced = np.log(zero) # -inf + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_log_sum_exp_empty_set", + ) diff --git a/onnx/backend/test/case/node/reducel1.py b/onnx/backend/test/case/node/reducel1.py index c88be5a521e..cb57987858d 100644 --- a/onnx/backend/test/case/node/reducel1.py +++ b/onnx/backend/test/case/node/reducel1.py @@ -162,3 +162,27 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_l1_negative_axes_keep_dims_random", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceL1", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l1_empty_set", + ) diff --git a/onnx/backend/test/case/node/reducel2.py b/onnx/backend/test/case/node/reducel2.py index 71de8eb9efe..f454a97460e 100644 --- a/onnx/backend/test/case/node/reducel2.py +++ b/onnx/backend/test/case/node/reducel2.py @@ -180,3 +180,27 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_l2_negative_axes_keep_dims_random", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceL2", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_l2_empty_set", + ) diff --git a/onnx/backend/test/case/node/reducemin.py b/onnx/backend/test/case/node/reducemin.py index 1b924c6d18a..2f9bfe3c7fe 100644 --- a/onnx/backend/test/case/node/reducemin.py +++ b/onnx/backend/test/case/node/reducemin.py @@ -205,3 +205,29 @@ def export_bool_inputs() -> None: outputs=[reduced], name="test_reduce_min_bool_inputs", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceMin", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + one = np.array(np.ones(reduced_shape, dtype=np.float32)) + zero = np.array(np.zeros(reduced_shape, dtype=np.float32)) + reduced = one / zero # inf + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_min_empty_set", + ) diff --git a/onnx/backend/test/case/node/reduceprod.py b/onnx/backend/test/case/node/reduceprod.py index e80c34e3d94..74e9b1188af 100644 --- a/onnx/backend/test/case/node/reduceprod.py +++ b/onnx/backend/test/case/node/reduceprod.py @@ -160,3 +160,27 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_prod_negative_axes_keepdims_random", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceProd", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + reduced = np.array(np.ones(reduced_shape, dtype=np.float32)) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_prod_empty_set", + ) diff --git a/onnx/backend/test/case/node/reducesum.py b/onnx/backend/test/case/node/reducesum.py index 8458f99cd2d..04e7d6e0256 100644 --- a/onnx/backend/test/case/node/reducesum.py +++ b/onnx/backend/test/case/node/reducesum.py @@ -194,3 +194,53 @@ def export_empty_axes_input_noop() -> None: outputs=[reduced], name="test_reduce_sum_negative_axes_keepdims_random", ) + + @staticmethod + def export_empty_set() -> None: + """Test case with the reduced-axis of size zero.""" + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", + ) + + @staticmethod + def export_non_reduced_axis_zero() -> None: + """Test case with the non-reduced-axis of size zero.""" + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 0, 1] + + node = onnx.helper.make_node( + "ReduceSum", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([2], dtype=np.int64) + reduced = np.array([], dtype=np.float32).reshape(reduced_shape) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_empty_set", + ) diff --git a/onnx/backend/test/case/node/reducesumsquare.py b/onnx/backend/test/case/node/reducesumsquare.py index 3d3d995688a..95df56910af 100644 --- a/onnx/backend/test/case/node/reducesumsquare.py +++ b/onnx/backend/test/case/node/reducesumsquare.py @@ -167,3 +167,27 @@ def export_negative_axes_keepdims() -> None: outputs=[reduced], name="test_reduce_sum_square_negative_axes_keepdims_random", ) + + @staticmethod + def export_empty_set() -> None: + shape = [2, 0, 4] + keepdims = 1 + reduced_shape = [2, 1, 4] + + node = onnx.helper.make_node( + "ReduceSumSquare", + inputs=["data", "axes"], + outputs=["reduced"], + keepdims=keepdims, + ) + + data = np.array([], dtype=np.float32).reshape(shape) + axes = np.array([1], dtype=np.int64) + reduced = np.array(np.zeros(reduced_shape, dtype=np.float32)) + + expect( + node, + inputs=[data, axes], + outputs=[reduced], + name="test_reduce_sum_square_empty_set", + ) diff --git a/onnx/defs/reduction/defs.cc b/onnx/defs/reduction/defs.cc index fee317be308..e7d06a7f1aa 100644 --- a/onnx/defs/reduction/defs.cc +++ b/onnx/defs/reduction/defs.cc @@ -12,19 +12,17 @@ namespace ONNX_NAMESPACE { -std::function ReduceDocGeneratorWithFunctionBody(const char* name, const char* func_body) { - return ReduceDocGenerator_opset13_20(name, false, true, func_body); -} - ONNX_OPERATOR_SET_SCHEMA( ReduceMax, 20, - OpSchema().FillUsing(ReduceDocGenerator_opset13_20("max", true, true, nullptr, nullptr, true))); + OpSchema().FillUsing(ReduceOpGenerator("max", EMPTY_MIN, true, true, nullptr, nullptr, true))); + ONNX_OPERATOR_SET_SCHEMA( ReduceMin, 20, - OpSchema().FillUsing(ReduceDocGenerator_opset13_20("min", true, true, nullptr, nullptr, true))); -ONNX_OPERATOR_SET_SCHEMA(ReduceSum, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("sum", false, true))); + OpSchema().FillUsing(ReduceOpGenerator("min", EMPTY_MAX, true, true, nullptr, nullptr, true))); + +ONNX_OPERATOR_SET_SCHEMA(ReduceSum, 13, OpSchema().FillUsing(ReduceOpDynamicAxes("sum", EMPTY_ZERO))); const char* reduce_sum_square_func_body = R"ONNX( { @@ -36,11 +34,11 @@ const char* reduce_sum_square_func_body = R"ONNX( ONNX_OPERATOR_SET_SCHEMA( ReduceSumSquare, 18, - OpSchema().FillUsing(ReduceDocGeneratorWithFunctionBody("sum square", reduce_sum_square_func_body))); + OpSchema().FillUsing(ReduceFunctionOp("sum square", EMPTY_ZERO, reduce_sum_square_func_body))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 18, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("mean", false, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 18, OpSchema().FillUsing(ReduceOpDynamicAxes("mean", EMPTY_UNDEFINED))); -ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 18, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("product", false, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 18, OpSchema().FillUsing(ReduceOpDynamicAxes("product", EMPTY_ONE))); const char* reduce_log_sum_func_body = R"ONNX( { @@ -48,10 +46,11 @@ const char* reduce_log_sum_func_body = R"ONNX( reduced = Log (reduced_sum) } )ONNX"; + ONNX_OPERATOR_SET_SCHEMA( ReduceLogSum, 18, - OpSchema().FillUsing(ReduceDocGeneratorWithFunctionBody("log sum", reduce_log_sum_func_body))); + OpSchema().FillUsing(ReduceFunctionOp("log sum", EMPTY_MINUS_INF, reduce_log_sum_func_body))); const char* reduce_log_sum_exp_func_body = R"ONNX( { @@ -62,10 +61,11 @@ const char* reduce_log_sum_exp_func_body = R"ONNX( reduced = CastLike(reduced_double, data) } )ONNX"; + ONNX_OPERATOR_SET_SCHEMA( ReduceLogSumExp, 18, - OpSchema().FillUsing(ReduceDocGeneratorWithFunctionBody("log sum exponent", reduce_log_sum_exp_func_body))); + OpSchema().FillUsing(ReduceFunctionOp("log sum exponent", EMPTY_MINUS_INF, reduce_log_sum_exp_func_body))); const char* reduce_l1_func_body = R"ONNX( { @@ -73,10 +73,11 @@ const char* reduce_l1_func_body = R"ONNX( reduced = ReduceSum(data_abs, axes) } )ONNX"; + ONNX_OPERATOR_SET_SCHEMA( ReduceL1, 18, - OpSchema().FillUsing(ReduceDocGeneratorWithFunctionBody("L1 norm", reduce_l1_func_body))); + OpSchema().FillUsing(ReduceFunctionOp("L1 norm", EMPTY_ZERO, reduce_l1_func_body))); const char* reduce_l2_func_body = R"ONNX( { @@ -87,10 +88,11 @@ const char* reduce_l2_func_body = R"ONNX( reduced = CastLike(sqrt, data) } )ONNX"; + ONNX_OPERATOR_SET_SCHEMA( ReduceL2, 18, - OpSchema().FillUsing(ReduceDocGeneratorWithFunctionBody("L2 norm", reduce_l2_func_body))); + OpSchema().FillUsing(ReduceFunctionOp("L2 norm", EMPTY_ZERO, reduce_l2_func_body))); std::function ArgReduceDocGenerator(const char* name) { return [=](OpSchema& schema) { diff --git a/onnx/defs/reduction/old.cc b/onnx/defs/reduction/old.cc index 22f4031832f..5e3ed36bc5f 100644 --- a/onnx/defs/reduction/old.cc +++ b/onnx/defs/reduction/old.cc @@ -194,17 +194,19 @@ ONNX_OPERATOR_SET_SCHEMA(ArgMax, 12, OpSchema().FillUsing(ArgReduceDocGenerator_ ONNX_OPERATOR_SET_SCHEMA(ArgMin, 12, OpSchema().FillUsing(ArgReduceDocGenerator_opset12("min"))); -std::function ReduceDocGenerator_opset1(const char* name, int opset = 1) { +std::function ReduceDocGenerator_opset1(const char* name, const char* empty_value, int opset = 1) { return [=](OpSchema& schema) { std::string doc; POPULATE_OP_DOC_STR(doc = R"DOC( Computes the {name} of the input tensor's element along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then -the resulted tensor have the reduced dimension pruned. +the resulted tensor have the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields {empty_value}. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.)DOC"; ReplaceAll(doc, "{name}", name);); + ReplaceAll(doc, "{empty_value}", empty_value); schema.SetDoc(doc.c_str()); schema.Attr( "axes", @@ -265,29 +267,32 @@ False instead of True.)DOC"; }; } -ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("max"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("max", EMPTY_MIN))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("min"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("min", EMPTY_MAX))); -ONNX_OPERATOR_SET_SCHEMA(ReduceSum, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("sum"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceSum, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("sum", EMPTY_ZERO))); -ONNX_OPERATOR_SET_SCHEMA(ReduceSumSquare, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("sum square"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceSumSquare, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("sum square", EMPTY_ZERO))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("mean"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("mean", EMPTY_UNDEFINED))); -ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("product"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("product", EMPTY_ONE))); -ONNX_OPERATOR_SET_SCHEMA(ReduceLogSum, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("log sum"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceLogSum, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("log sum", EMPTY_MINUS_INF))); -ONNX_OPERATOR_SET_SCHEMA(ReduceLogSumExp, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("log sum exponent"))); +ONNX_OPERATOR_SET_SCHEMA( + ReduceLogSumExp, + 1, + OpSchema().FillUsing(ReduceDocGenerator_opset1("log sum exponent", EMPTY_MINUS_INF))); -ONNX_OPERATOR_SET_SCHEMA(ReduceL1, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("L1 norm"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceL1, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("L1 norm", EMPTY_ZERO))); -ONNX_OPERATOR_SET_SCHEMA(ReduceL2, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("L2 norm"))); +ONNX_OPERATOR_SET_SCHEMA(ReduceL2, 1, OpSchema().FillUsing(ReduceDocGenerator_opset1("L2 norm", EMPTY_ZERO))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 11, OpSchema().FillUsing(ReduceDocGenerator_opset1("max", 11))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 11, OpSchema().FillUsing(ReduceDocGenerator_opset1("max", EMPTY_MIN, 11))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 11, OpSchema().FillUsing(ReduceDocGenerator_opset1("min", 11))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 11, OpSchema().FillUsing(ReduceDocGenerator_opset1("min", EMPTY_MAX, 11))); std::function ArgReduceDocGenerator_opset1(const char* name) { return [=](OpSchema& schema) { @@ -359,6 +364,7 @@ std::function ArgReduceDocGenerator_opset11(const char* name) { Computes the indices of the {name} elements of the input tensor's element along the provided axis. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equal 0, then the resulting tensor has the reduced dimension pruned. +The input tensor must not be empty. The type of the output tensor is integer.)DOC"; ReplaceAll(doc, "{name}", name); schema.SetDoc(doc.c_str()); @@ -422,16 +428,19 @@ The type of the output tensor is integer.)DOC"; ONNX_OPERATOR_SET_SCHEMA(ArgMax, 11, OpSchema().FillUsing(ArgReduceDocGenerator_opset11("max"))); ONNX_OPERATOR_SET_SCHEMA(ArgMin, 11, OpSchema().FillUsing(ArgReduceDocGenerator_opset11("min"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("max", true))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("min", true))); -ONNX_OPERATOR_SET_SCHEMA(ReduceSumSquare, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("sum square"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("mean"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("product"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceLogSum, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("log sum"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceLogSumExp, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("log sum exponent"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceL1, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("L1 norm"))); -ONNX_OPERATOR_SET_SCHEMA(ReduceL2, 13, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("L2 norm"))); - -ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 18, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("max", true, true))); -ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 18, OpSchema().FillUsing(ReduceDocGenerator_opset13_20("min", true, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 13, OpSchema().FillUsing(ReduceOpGenerator("max", EMPTY_MIN, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 13, OpSchema().FillUsing(ReduceOpGenerator("min", EMPTY_MAX, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceSumSquare, 13, OpSchema().FillUsing(ReduceOpGenerator("sum square", EMPTY_ZERO))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMean, 13, OpSchema().FillUsing(ReduceOpGenerator("mean", EMPTY_UNDEFINED))); +ONNX_OPERATOR_SET_SCHEMA(ReduceProd, 13, OpSchema().FillUsing(ReduceOpGenerator("product", EMPTY_ONE))); +ONNX_OPERATOR_SET_SCHEMA(ReduceLogSum, 13, OpSchema().FillUsing(ReduceOpGenerator("log sum", EMPTY_MINUS_INF))); +ONNX_OPERATOR_SET_SCHEMA( + ReduceLogSumExp, + 13, + OpSchema().FillUsing(ReduceOpGenerator("log sum exponent", EMPTY_MINUS_INF))); +ONNX_OPERATOR_SET_SCHEMA(ReduceL1, 13, OpSchema().FillUsing(ReduceOpGenerator("L1 norm", EMPTY_ZERO))); +ONNX_OPERATOR_SET_SCHEMA(ReduceL2, 13, OpSchema().FillUsing(ReduceOpGenerator("L2 norm", EMPTY_ZERO))); + +ONNX_OPERATOR_SET_SCHEMA(ReduceMax, 18, OpSchema().FillUsing(ReduceOpGenerator("max", EMPTY_MIN, true, true))); +ONNX_OPERATOR_SET_SCHEMA(ReduceMin, 18, OpSchema().FillUsing(ReduceOpGenerator("min", EMPTY_MAX, true, true))); } // namespace ONNX_NAMESPACE diff --git a/onnx/defs/reduction/utils.cc b/onnx/defs/reduction/utils.cc index 9b4f97dad74..bb8c3804333 100644 --- a/onnx/defs/reduction/utils.cc +++ b/onnx/defs/reduction/utils.cc @@ -9,6 +9,7 @@ #include namespace ONNX_NAMESPACE { + std::vector GetSupportedDataTypesForReductionOps(bool supports8bit, bool supports_bool) { auto data_types = OpSchema::numeric_types_for_math_reduction_ir4(); if (supports8bit) { @@ -22,8 +23,9 @@ std::vector GetSupportedDataTypesForReductionOps(bool supports8bit, return data_types; } -std::function ReduceDocGenerator_opset13_20( +std::function ReduceOpGenerator( const char* name, + const char* empty_value, bool supports_8bit_datatypes, bool axes_input, const char* func_body, @@ -34,7 +36,8 @@ std::function ReduceDocGenerator_opset13_20( Computes the {name} of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are -valid.)DOC"; +valid. Reduction over an empty set of values yields {empty_value}. +)DOC"; if (supports_boolean_datatype) { doc += R"DOC( @@ -46,6 +49,7 @@ The above behavior is similar to numpy, with the exception that numpy defaults ` to `False` instead of `True`.)DOC"; ReplaceAll(doc, "{name}", name); + ReplaceAll(doc, "{empty_value}", empty_value); POPULATE_OP_DOC_STR(doc = doc;); schema.SetDoc(doc.c_str()); schema.Attr( diff --git a/onnx/defs/reduction/utils.h b/onnx/defs/reduction/utils.h index 91c4e605336..af6ae91030a 100644 --- a/onnx/defs/reduction/utils.h +++ b/onnx/defs/reduction/utils.h @@ -10,11 +10,33 @@ #include "onnx/defs/tensor_proto_util.h" namespace ONNX_NAMESPACE { -std::function ReduceDocGenerator_opset13_20( + +// Constants used to indicate value returned by reduction of an empty set of values. +constexpr const char* EMPTY_ZERO = "0"; +constexpr const char* EMPTY_ONE = "1"; +constexpr const char* EMPTY_UNDEFINED = "undefined"; +constexpr const char* EMPTY_MIN = + "minus infinity (if supported by the datatype) or the minimum value of the data type otherwise"; +constexpr const char* EMPTY_MAX = + "plus infinity (if supported by the datatype) or the maximum value of the data type otherwise"; +constexpr const char* EMPTY_MINUS_INF = "minus infinity (if supported by the datatype) or undefined otherwise"; + +std::function ReduceOpGenerator( const char* name, + const char* empty_value, bool supports_8bit_datatypes = false, bool axes_input = false, const char* func_body = nullptr, ContextDependentFunctionBodyBuilder function_builder = nullptr, bool supports_boolean_datatype = false); + +inline std::function ReduceOpDynamicAxes(const char* name, const char* empty_value) { + return ReduceOpGenerator(name, empty_value, false, true, nullptr, nullptr, false); +} + +inline std::function +ReduceFunctionOp(const char* name, const char* empty_value, const char* func_body) { + return ReduceOpGenerator(name, empty_value, false, true, func_body); +} + } // namespace ONNX_NAMESPACE diff --git a/onnx/reference/ops/_op.py b/onnx/reference/ops/_op.py index 2a719917aaa..3b93497d37a 100644 --- a/onnx/reference/ops/_op.py +++ b/onnx/reference/ops/_op.py @@ -189,3 +189,11 @@ def handle_axes(self, axes): # noqa: PLR0911 if 0 in axes.shape: return None return tuple(axes.ravel().tolist()) + + def output_shape(self, data, axes, keepdims): + return np.sum(data, axis=axes, keepdims=keepdims).shape + + def reduce_constant(self, data, const_val, axes, keepdims): + """Special case reduction where the output value is a constant.""" + output_shape = self.output_shape(data, axes, keepdims) + return (np.full(output_shape, const_val, dtype=data.dtype),) diff --git a/onnx/reference/ops/op_reduce_log_sum.py b/onnx/reference/ops/op_reduce_log_sum.py index 957cfec091b..bc955a70519 100644 --- a/onnx/reference/ops/op_reduce_log_sum.py +++ b/onnx/reference/ops/op_reduce_log_sum.py @@ -11,6 +11,8 @@ class ReduceLogSum_1(OpRunReduceNumpy): def _run(self, data, axes=None, keepdims=True): # type: ignore tax = tuple(axes) if axes is not None else None + if data.size == 0: + return self.reduce_constant(data, -np.inf, tax, keepdims) res = np.sum(data, axis=tax, keepdims=keepdims) # type: ignore[arg-type] if len(res.shape) > 0: return (np.log(res, out=res),) @@ -25,6 +27,9 @@ def _run(self, data, axes=None, keepdims=1, noop_with_empty_axes=0): # type: ig axes = self.handle_axes(axes) keepdims = keepdims != 0 # type: ignore + if data.size == 0: + return self.reduce_constant(data, -np.inf, axes, keepdims) + res = np.sum(data, axis=axes, keepdims=keepdims) if len(res.shape) > 0: return (np.log(res, out=res),) diff --git a/onnx/reference/ops/op_reduce_log_sum_exp.py b/onnx/reference/ops/op_reduce_log_sum_exp.py index 30517f3fcc8..74adea6b016 100644 --- a/onnx/reference/ops/op_reduce_log_sum_exp.py +++ b/onnx/reference/ops/op_reduce_log_sum_exp.py @@ -25,6 +25,9 @@ def compute_log_sum_exp(data, axes, keepdims): class ReduceLogSumExp_1(OpRunReduceNumpy): def _run(self, data, axes=None, keepdims=None): # type: ignore tax = tuple(axes) if axes is not None else None + + if data.size == 0: + return self.reduce_constant(data, -np.inf, tax, keepdims) return compute_log_sum_exp(data, tax, keepdims) @@ -36,4 +39,7 @@ def _run(self, data, axes=None, keepdims=1, noop_with_empty_axes=0): # type: ig axes = self.handle_axes(axes) keepdims = keepdims != 0 # type: ignore + if data.size == 0: + return self.reduce_constant(data, -np.inf, axes, keepdims) + return compute_log_sum_exp(data, axes, keepdims) diff --git a/onnx/reference/ops/op_reduce_max.py b/onnx/reference/ops/op_reduce_max.py index 45886390480..a35da758b40 100644 --- a/onnx/reference/ops/op_reduce_max.py +++ b/onnx/reference/ops/op_reduce_max.py @@ -11,6 +11,14 @@ class ReduceMax_1(OpRunReduceNumpy): def _run(self, data, axes=None, keepdims=None): # type: ignore axes = tuple(axes) if axes is not None else None + if data.size == 0: + minvalue = ( + np.iinfo(data.dtype).min + if np.issubdtype(data.dtype, np.integer) + else -np.inf + ) + return self.reduce_constant(data, minvalue, axes, keepdims) + res = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1) if keepdims == 0 and not isinstance(res, np.ndarray): # The runtime must return a numpy array of a single float. @@ -25,6 +33,14 @@ def _run(self, data, axes=None, keepdims: int = 1, noop_with_empty_axes: int = 0 axes = self.handle_axes(axes) keepdims = keepdims != 0 # type: ignore + if data.size == 0: + minvalue = ( + np.iinfo(data.dtype).min + if np.issubdtype(data.dtype, np.integer) + else -np.inf + ) + return self.reduce_constant(data, minvalue, axes, keepdims) + res = np.maximum.reduce(data, axis=axes, keepdims=keepdims) if keepdims == 0 and not isinstance(res, np.ndarray): # The runtime must return a numpy array of a single float. diff --git a/onnx/reference/ops/op_reduce_min.py b/onnx/reference/ops/op_reduce_min.py index a8dab58344a..7c89a954a7d 100644 --- a/onnx/reference/ops/op_reduce_min.py +++ b/onnx/reference/ops/op_reduce_min.py @@ -11,6 +11,14 @@ class ReduceMin_1(OpRunReduceNumpy): def _run(self, data, axes=None, keepdims=None): # type: ignore axes = tuple(axes) if axes is not None else None + if data.size == 0: + maxvalue = ( + np.iinfo(data.dtype).max + if np.issubdtype(data.dtype, np.integer) + else np.inf + ) + return self.reduce_constant(data, maxvalue, axes, keepdims) + res = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1) if keepdims == 0 and not isinstance(res, np.ndarray): # The runtime must return a numpy array of a single float. @@ -29,6 +37,14 @@ def _run(self, data, axes=None, keepdims: int = 1, noop_with_empty_axes: int = 0 axes = self.handle_axes(axes) keepdims = keepdims != 0 # type: ignore + if data.size == 0: + maxvalue = ( + np.iinfo(data.dtype).max + if np.issubdtype(data.dtype, np.integer) + else np.inf + ) + return self.reduce_constant(data, maxvalue, axes, keepdims) + res = np.minimum.reduce(data, axis=axes, keepdims=keepdims) if keepdims == 0 and not isinstance(res, np.ndarray): # The runtime must return a numpy array of a single float. diff --git a/onnx/test/shape_inference_test.py b/onnx/test/shape_inference_test.py index 919bcfb5cfa..9346b7c8dd9 100644 --- a/onnx/test/shape_inference_test.py +++ b/onnx/test/shape_inference_test.py @@ -2885,6 +2885,34 @@ def test_reduce_op_shape_2_axis_opset18(self) -> None: opset_imports=[operatorsetid], ) + def test_reduce_op_empty_set_opset13(self) -> None: + graph = self._make_graph( + [("x", TensorProto.FLOAT, (24, 0, 11))], + [make_node("ReduceL1", "x", "y", axes=(1,), keepdims=1)], + [], + initializer=[], + ) + operatorsetid = OperatorSetIdProto(domain="", version=13) + self._assert_inferred( + graph, + [make_tensor_value_info("y", TensorProto.FLOAT, (24, 1, 11))], + opset_imports=[operatorsetid], + ) + + def test_reduce_op_empty_set_opset18(self) -> None: + graph = self._make_graph( + [("x", TensorProto.FLOAT, (24, 0, 11)), ("axes", TensorProto.INT64, (1,))], + [make_node("ReduceL1", ["x", "axes"], "y", keepdims=1)], + [], + initializer=[make_tensor("axes", TensorProto.INT64, (1,), (1,))], + ) + operatorsetid = OperatorSetIdProto(domain="", version=18) + self._assert_inferred( + graph, + [make_tensor_value_info("y", TensorProto.FLOAT, (24, 1, 11))], + opset_imports=[operatorsetid], + ) + def test_reduce_op_shape_keep_dims_opset13(self) -> None: graph = self._make_graph( [("x", TensorProto.FLOAT, (24, 4, 11))], diff --git a/onnx/test/test_backend_onnxruntime.py b/onnx/test/test_backend_onnxruntime.py index c811ba938bd..64686cde07f 100644 --- a/onnx/test/test_backend_onnxruntime.py +++ b/onnx/test/test_backend_onnxruntime.py @@ -205,6 +205,22 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs): ")" ) +# The followiing tests fail due to a bug in onnxruntime in handling reduction +# ops that perform reduction over an empty set of values. +backend_test.exclude( + "(" + "test_reduce_sum_empty_set" + "|test_reduce_prod_empty_set" + "|test_reduce_min_empty_set" + "|test_reduce_max_empty_set" + "|test_reduce_sum_square_empty_set" + "|test_reduce_log_sum_empty_set" + "|test_reduce_log_sum_exp_empty_set" + "|test_reduce_l1_empty_set" + "|test_reduce_l2_empty_set" + ")" +) + # The following tests fail for no obvious reason. backend_test.exclude( "("