diff --git a/.github/workflows/manylinux/entrypoint.sh b/.github/workflows/manylinux/entrypoint.sh index 03e27ede1ae..e3bf62b47e6 100644 --- a/.github/workflows/manylinux/entrypoint.sh +++ b/.github/workflows/manylinux/entrypoint.sh @@ -59,5 +59,5 @@ fi # Remove useless *-linux*.whl; only keep manylinux*.whl rm -f dist/*-linux*.whl -echo "Succesfully build wheels:" +echo "Successfully build wheels:" find . -type f -iname "*manylinux*.whl" diff --git a/docs/Changelog-ml.md b/docs/Changelog-ml.md index 30f5beca979..f67a6854648 100644 --- a/docs/Changelog-ml.md +++ b/docs/Changelog-ml.md @@ -408,7 +408,7 @@ This version of the operator has been available since version 1 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
-
The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]
+
The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]
T2 : tensor(string), tensor(int64)
The output will be a tensor of strings or integers.
@@ -609,7 +609,7 @@ This version of the operator has been available since version 1 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
The input must be a tensor of a numeric type, either [C] or [N,C].
T2 : tensor(string), tensor(int64)
-
The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.
+
The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.
### **ai.onnx.ml.SVMRegressor-1** @@ -777,7 +777,7 @@ This version of the operator has been available since version 1 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
The input type must be a tensor of a numeric type.
T2 : tensor(string), tensor(int64)
-
The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.
+
The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.
### **ai.onnx.ml.TreeEnsembleRegressor-1** @@ -1057,7 +1057,7 @@ This version of the operator has been available since version 3 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
The input type must be a tensor of a numeric type.
T2 : tensor(string), tensor(int64)
-
The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.
+
The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.
### **ai.onnx.ml.TreeEnsembleRegressor-3** diff --git a/docs/Changelog.md b/docs/Changelog.md index 7ff5bf11be6..0c4614d4835 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -2112,7 +2112,7 @@ This version of the operator has been available since version 1 of the default O user_defined_vals[i] = b + b; /* End user-defined code */ } - // my_local = 123; // Can't do this. my_local was defined in the the body + // my_local = 123; // Can't do this. my_local was defined in the body // These below values are live-out from the loop and therefore accessible b_out; user_defined_vals; keepgoing_out; @@ -8546,7 +8546,7 @@ This version of the operator has been available since version 9 of the default O ### **MaxUnpool-9** MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the the output information from a MaxPool op. The first + The input information to this op is typically the output information from a MaxPool op. The first input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. @@ -11513,7 +11513,7 @@ This version of the operator has been available since version 11 of the default
outputs (variadic, heterogeneous) : V
-
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
+
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
#### Type Constraints @@ -11881,7 +11881,7 @@ This version of the operator has been available since version 11 of the default ### **MaxUnpool-11** MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the the output information from a MaxPool op. The first + The input information to this op is typically the output information from a MaxPool op. The first input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. @@ -16114,7 +16114,7 @@ This version of the operator has been available since version 13 of the default
outputs (variadic, heterogeneous) : V
-
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
+
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
#### Type Constraints @@ -20021,7 +20021,7 @@ This version of the operator has been available since version 16 of the default
outputs (variadic, heterogeneous) : V
-
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
+
Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.
#### Type Constraints diff --git a/docs/MetadataProps.md b/docs/MetadataProps.md index 21a7c4dfd37..93313fc226f 100644 --- a/docs/MetadataProps.md +++ b/docs/MetadataProps.md @@ -14,7 +14,7 @@ The motivation of such a mechanism is to allow model authors to convey to model In the case of images there are many option for providing valid image data. However a model which consumes images was trained with a particular set of these options which must be used during inferencing. -The goal is this proposal is to provide enough metadata that the model consumer can perform their own featurization prior to running the model and provide a compatible input or retrive an output and know what its format is. +The goal is this proposal is to provide enough metadata that the model consumer can perform their own featurization prior to running the model and provide a compatible input or retrieve an output and know what its format is. ## Image Category Definition diff --git a/docs/Operators-ml.md b/docs/Operators-ml.md index 9c3f4139657..25898414f7b 100644 --- a/docs/Operators-ml.md +++ b/docs/Operators-ml.md @@ -459,7 +459,7 @@ This version of the operator has been available since version 1 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
-
The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]
+
The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]
T2 : tensor(string), tensor(int64)
The output will be a tensor of strings or integers.
@@ -664,7 +664,7 @@ This version of the operator has been available since version 1 of the 'ai.onnx.
T1 : tensor(float), tensor(double), tensor(int64), tensor(int32)
The input must be a tensor of a numeric type, either [C] or [N,C].
T2 : tensor(string), tensor(int64)
-
The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.
+
The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.
@@ -847,7 +847,7 @@ Other versions of this operator: 1, **MaxUnpool** MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the the output information from a MaxPool op. The first + The input information to this op is typically the output information from a MaxPool op. The first input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. diff --git a/docs/TypeDenotation.md b/docs/TypeDenotation.md index 0929b4f549f..eb06a2a2641 100644 --- a/docs/TypeDenotation.md +++ b/docs/TypeDenotation.md @@ -6,7 +6,7 @@ Type Denotation is used to describe semantic information around what the inputs ## Motivation -The motivation of such a mechanism can be illustrated via a simple example. In the the neural network SqueezeNet, it takes in an NCHW image input float[1,3,244,244] and produces a output float[1,1000,1,1]: +The motivation of such a mechanism can be illustrated via a simple example. In the neural network SqueezeNet, it takes in an NCHW image input float[1,3,244,244] and produces a output float[1,1000,1,1]: ``` input_in_NCHW -> data_0 -> SqueezeNet() -> output_softmaxout_1 diff --git a/onnx/compose.py b/onnx/compose.py index 3e87230ff81..051f9b94756 100644 --- a/onnx/compose.py +++ b/onnx/compose.py @@ -287,7 +287,7 @@ def merge_models( if m1.ir_version != m2.ir_version: raise ValueError( f"IR version mismatch {m1.ir_version} != {m2.ir_version}." - " Both models should have have the same IR version") + " Both models should have the same IR version") ir_version = m1.ir_version opset_import_map: MutableMapping[str, int] = {} diff --git a/onnx/defs/controlflow/defs.cc b/onnx/defs/controlflow/defs.cc index c2c33afa47f..7cf267df217 100644 --- a/onnx/defs/controlflow/defs.cc +++ b/onnx/defs/controlflow/defs.cc @@ -381,7 +381,7 @@ ONNX_OPERATOR_SET_SCHEMA( "static shapes S1 and S2, then the shape of the corresponding output " "variable of the if-node (if present) must be compatible with both S1 " "and S2 as it represents the union of both possible shapes." - "For example, if in a model file, the the first " + "For example, if in a model file, the first " "output of `then_branch` is typed float tensor with shape [2] and the " "first output of `else_branch` is another float tensor with shape [3], " "If's first output should have (a) no shape set, or (b) " diff --git a/onnx/defs/controlflow/old.cc b/onnx/defs/controlflow/old.cc index 9f435ba5243..d25b417da53 100644 --- a/onnx/defs/controlflow/old.cc +++ b/onnx/defs/controlflow/old.cc @@ -679,7 +679,7 @@ C-style code: user_defined_vals[i] = b + b; /* End user-defined code */ } - // my_local = 123; // Can't do this. my_local was defined in the the body + // my_local = 123; // Can't do this. my_local was defined in the body // These below values are live-out from the loop and therefore accessible b_out; user_defined_vals; keepgoing_out; @@ -1409,7 +1409,7 @@ ONNX_OPERATOR_SET_SCHEMA( "static shapes S1 and S2, then the shape of the corresponding output " "variable of the if-node (if present) must be compatible with both S1 " "and S2 as it represents the union of both possible shapes." - "For example, if in a model file, the the first " + "For example, if in a model file, the first " "output of `then_branch` is typed float tensor with shape [2] and the " "first output of `else_branch` is another float tensor with shape [3], " "If's first output should have (a) no shape set, or (b) " @@ -1501,7 +1501,7 @@ ONNX_OPERATOR_SET_SCHEMA( "static shapes S1 and S2, then the shape of the corresponding output " "variable of the if-node (if present) must be compatible with both S1 " "and S2 as it represents the union of both possible shapes." - "For example, if in a model file, the the first " + "For example, if in a model file, the first " "output of `then_branch` is typed float tensor with shape [2] and the " "first output of `else_branch` is another float tensor with shape [3], " "If's first output should have (a) no shape set, or (b) " diff --git a/onnx/defs/nn/defs.cc b/onnx/defs/nn/defs.cc index b76a0b47f18..0857da0392c 100644 --- a/onnx/defs/nn/defs.cc +++ b/onnx/defs/nn/defs.cc @@ -52,7 +52,7 @@ void convPoolShapeInference( auto input_shape = ctx.getInputType(input1Idx)->tensor_type().shape(); if (input_shape.dim_size() < 2) { - fail_shape_inference("Input tensor must have atleast 2 dimensions"); + fail_shape_inference("Input tensor must have at least 2 dimensions"); } // first dim is the batch axis and the next is the number of channels. @@ -371,7 +371,7 @@ void maxUnpoolShapeInference(InferenceContext& ctx) { } auto input_shape = ctx.getInputType(0)->tensor_type().shape(); if (input_shape.dim_size() < 2) { - fail_shape_inference("Input tensor X must have atleast 2 dimensions."); + fail_shape_inference("Input tensor X must have at least 2 dimensions."); } // first dim is the batch axis and the next is the number of channels. @@ -446,7 +446,7 @@ void maxUnpoolShapeInference(InferenceContext& ctx) { static const char* MaxUnpool_ver9_doc = R"DOC( MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the the output information from a MaxPool op. The first + The input information to this op is typically the output information from a MaxPool op. The first input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. diff --git a/onnx/defs/nn/old.cc b/onnx/defs/nn/old.cc index 58f503b4592..725278ec60c 100644 --- a/onnx/defs/nn/old.cc +++ b/onnx/defs/nn/old.cc @@ -256,7 +256,7 @@ void convPoolShapeInference1( auto input_shape = ctx.getInputType(input1Idx)->tensor_type().shape(); if (input_shape.dim_size() < 2) { - fail_shape_inference("Input tensor must have atleast 2 dimensions"); + fail_shape_inference("Input tensor must have at least 2 dimensions"); } // first dim is the batch axis and the next is the number of channels. @@ -720,7 +720,7 @@ void maxUnpoolShapeInference1(InferenceContext& ctx) { } auto input_shape = ctx.getInputType(0)->tensor_type().shape(); if (input_shape.dim_size() < 2) { - fail_shape_inference("Input tensor X must have atleast 2 dimensions."); + fail_shape_inference("Input tensor X must have at least 2 dimensions."); } // first dim is the batch axis and the next is the number of channels. @@ -795,7 +795,7 @@ void maxUnpoolShapeInference1(InferenceContext& ctx) { static const char* MaxUnpool_ver9_doc = R"DOC( MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the the output information from a MaxPool op. The first + The input information to this op is typically the output information from a MaxPool op. The first input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. diff --git a/onnx/defs/schema.h b/onnx/defs/schema.h index d97af63aa23..d1cae546525 100644 --- a/onnx/defs/schema.h +++ b/onnx/defs/schema.h @@ -1153,7 +1153,7 @@ OpSchema GetOpSchema(); // based on a convention using name, domain, and version. Operator schema are // normally included in operator sets and registered in OpSchemaRegistry::map(). // In this case, callers should set dbg_included_in_static_opset to true. This -// assists with runtime validation in in DEBUG builds ensuring the intended set +// assists with runtime validation in DEBUG builds ensuring the intended set // of operator schema is registered. #define ONNX_OPERATOR_SET_SCHEMA_EX(name, domain, domain_str, ver, dbg_included_in_static_opset, impl) \ class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(domain, ver, name); \ diff --git a/onnx/defs/tensor/defs.cc b/onnx/defs/tensor/defs.cc index a6cee8b91c4..78c976c4d96 100644 --- a/onnx/defs/tensor/defs.cc +++ b/onnx/defs/tensor/defs.cc @@ -2070,7 +2070,7 @@ ONNX_OPERATOR_SET_SCHEMA( propagateElemTypeFromInputToOutput(ctx, 0, 0); // Shape inference - // Needs atleast the first input to proceed + // Needs at least the first input to proceed if (!hasNInputShapes(ctx, 1)) { return; } diff --git a/onnx/defs/tensor/old.cc b/onnx/defs/tensor/old.cc index 00d072985e9..2a4b3b2e9f2 100644 --- a/onnx/defs/tensor/old.cc +++ b/onnx/defs/tensor/old.cc @@ -1723,7 +1723,7 @@ ONNX_OPERATOR_SET_SCHEMA( propagateElemTypeFromInputToOutput(ctx, 0, 0); // Shape inference - // Needs atleast the first input to proceed + // Needs at least the first input to proceed if (!hasNInputShapes(ctx, 1)) { return; } diff --git a/onnx/defs/traditionalml/defs.cc b/onnx/defs/traditionalml/defs.cc index 94bcd48424d..91f782276bf 100644 --- a/onnx/defs/traditionalml/defs.cc +++ b/onnx/defs/traditionalml/defs.cc @@ -378,7 +378,7 @@ ONNX_ML_OPERATOR_SET_SCHEMA( .TypeConstraint( "T1", {"tensor(float)", "tensor(double)", "tensor(int64)", "tensor(int32)"}, - "The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]") + "The input must be a tensor of a numeric type, and of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]") .TypeConstraint( "T2", {"tensor(string)", "tensor(int64)"}, @@ -604,7 +604,7 @@ ONNX_ML_OPERATOR_SET_SCHEMA( .TypeConstraint( "T2", {"tensor(string)", "tensor(int64)"}, - "The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.") + "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used. Its size will match the bactch size of the input.") .Attr( "kernel_type", "The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'.", @@ -721,7 +721,7 @@ ONNX_ML_OPERATOR_SET_SCHEMA( .TypeConstraint( "T2", {"tensor(string)", "tensor(int64)"}, - "The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.") + "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.") .Attr("nodes_treeids", "Tree id for each node.", AttributeProto::INTS, OPTIONAL_VALUE) .Attr( "nodes_nodeids", diff --git a/onnx/defs/traditionalml/old.cc b/onnx/defs/traditionalml/old.cc index 84f8bf5d247..fac8813f329 100644 --- a/onnx/defs/traditionalml/old.cc +++ b/onnx/defs/traditionalml/old.cc @@ -82,7 +82,7 @@ ONNX_ML_OPERATOR_SET_SCHEMA( .TypeConstraint( "T2", {"tensor(string)", "tensor(int64)"}, - "The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.") + "The output type will be a tensor of strings or integers, depending on which of the classlabels_* attributes is used.") .Attr("nodes_treeids", "Tree id for each node.", AttributeProto::INTS, OPTIONAL_VALUE) .Attr( "nodes_nodeids", diff --git a/onnx/test/helper_test.py b/onnx/test/helper_test.py index c857c01a5a4..3c8b88540b1 100644 --- a/onnx/test/helper_test.py +++ b/onnx/test/helper_test.py @@ -128,11 +128,11 @@ def test_attr_sparse_tensor_proto(self) -> None: dims=[len(sparse_values)], vals=np.array(sparse_values).astype(np.float32), raw=False) - linear_indicies = [2, 3, 5] - indicies_tensor = helper.make_tensor(name='indicies', data_type=TensorProto.INT64, - dims=[len(linear_indicies)], - vals=np.array(linear_indicies).astype(np.int64), raw=False) - sparse_tensor = helper.make_sparse_tensor(values_tensor, indicies_tensor, dense_shape) + linear_indices = [2, 3, 5] + indices_tensor = helper.make_tensor(name='indices', data_type=TensorProto.INT64, + dims=[len(linear_indices)], + vals=np.array(linear_indices).astype(np.int64), raw=False) + sparse_tensor = helper.make_sparse_tensor(values_tensor, indices_tensor, dense_shape) attr = helper.make_attribute("sparse_attr", sparse_tensor) self.assertEqual(attr.name, "sparse_attr") @@ -146,11 +146,11 @@ def test_attr_sparse_tensor_repeated_protos(self) -> None: dims=[len(sparse_values)], vals=np.array(sparse_values).astype(np.float32), raw=False) - linear_indicies = [2, 3, 5] - indicies_tensor = helper.make_tensor(name='indicies', data_type=TensorProto.INT64, - dims=[len(linear_indicies)], - vals=np.array(linear_indicies).astype(np.int64), raw=False) - sparse_tensor = helper.make_sparse_tensor(values_tensor, indicies_tensor, dense_shape) + linear_indices = [2, 3, 5] + indices_tensor = helper.make_tensor(name='indices', data_type=TensorProto.INT64, + dims=[len(linear_indices)], + vals=np.array(linear_indices).astype(np.int64), raw=False) + sparse_tensor = helper.make_sparse_tensor(values_tensor, indices_tensor, dense_shape) repeated_sparse = [sparse_tensor, sparse_tensor] attr = helper.make_attribute("sparse_attrs", repeated_sparse) diff --git a/onnx/version_converter/adapters/scan_9_8.h b/onnx/version_converter/adapters/scan_9_8.h index c821ba75b14..646ad70b4a2 100644 --- a/onnx/version_converter/adapters/scan_9_8.h +++ b/onnx/version_converter/adapters/scan_9_8.h @@ -54,7 +54,7 @@ struct Scan_9_8 final : public Adapter { node->removeAttribute(output_axes); } - // Handling Input and Ouput Changes + // Handling Input and Output Changes node->removeAllInputs();