From 9be14c0a3a2d9f6fe6e04370707ff32932b47c26 Mon Sep 17 00:00:00 2001 From: Gary Miguel Date: Tue, 7 Jun 2022 08:40:48 -0700 Subject: [PATCH] Use op name rather than hard-coding Hann in window op doc strings. (#4248) And strip trailing spaces. Signed-off-by: Gary Miguel Signed-off-by: Chun-Wei Chen --- docs/Changelog.md | 4 ++-- docs/Operators.md | 4 ++-- onnx/defs/math/defs.cc | 33 +++++++++++++-------------------- 3 files changed, 17 insertions(+), 24 deletions(-) diff --git a/docs/Changelog.md b/docs/Changelog.md index 7ff5bf11be6..0237a2de65d 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -20845,7 +20845,7 @@ This version of the operator has been available since version 17 of the default
output (non-differentiable) : T2
-
A Hann window with length: size. The output has the shape: [size].
+
A Blackman window with length: size. The output has the shape: [size].
#### Type Constraints @@ -20929,7 +20929,7 @@ This version of the operator has been available since version 17 of the default
output (non-differentiable) : T2
-
A Hann window with length: size. The output has the shape: [size].
+
A Hamming window with length: size. The output has the shape: [size].
#### Type Constraints diff --git a/docs/Operators.md b/docs/Operators.md index aec6f83c482..bfdfb320546 100644 --- a/docs/Operators.md +++ b/docs/Operators.md @@ -2417,7 +2417,7 @@ This version of the operator has been available since version 17 of the default
output (non-differentiable) : T2
-
A Hann window with length: size. The output has the shape: [size].
+
A Blackman window with length: size. The output has the shape: [size].
#### Type Constraints @@ -8232,7 +8232,7 @@ This version of the operator has been available since version 17 of the default
output (non-differentiable) : T2
-
A Hann window with length: size. The output has the shape: [size].
+
A Hamming window with length: size. The output has the shape: [size].
#### Type Constraints diff --git a/onnx/defs/math/defs.cc b/onnx/defs/math/defs.cc index ee96e627da9..b322037b6a8 100644 --- a/onnx/defs/math/defs.cc +++ b/onnx/defs/math/defs.cc @@ -757,7 +757,7 @@ ONNX_OPERATOR_SET_SCHEMA( .TypeAndShapeInferenceFunction(propagateShapeAndTypeFromFirstInput) .FunctionBody(R"ONNX( { - HS_X = HardSigmoid(X) + HS_X = HardSigmoid(X) Y = Mul (X, HS_X) } )ONNX")); @@ -1708,14 +1708,14 @@ ONNX_OPERATOR_SET_SCHEMA( static const char* QLinearMatMul_ver10_doc = R"DOC( Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. -It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, -and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. -Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor -(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row -or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be -an M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K] -for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may +It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, +and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). +For (x / y_scale), it is rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. +Scale and zero point must have same shape. They must be either scalar (per tensor) or N-D tensor +(per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row +or per column quantization. If the input is 2D of shape [M, K] then zero point and scale tensor may be +an M element vector [v_1, v_2, ..., v_M] for per row quantization and K element vector of shape [v_1, v_2, ..., v_K] +for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K] then zero point and scale tensor may have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. Production must never overflow, and accumulation may overflow if and only if in 32 bits. )DOC"; @@ -2858,16 +2858,9 @@ Generates a {name} window as described in the paper https://ieeexplore.ieee.org/ true, 1, OpSchema::NonDifferentiable); - schema.Output( - 0, - "output", - "A Hann window with length: size. " - "The output has the shape: [size].", - "T2", - OpSchema::Single, - true, - 1, - OpSchema::NonDifferentiable); + std::string output_doc("A {name} window with length: size. The output has the shape: [size]."); + ReplaceAll(output_doc, "{name}", name); + schema.Output(0, "output", output_doc, "T2", OpSchema::Single, true, 1, OpSchema::NonDifferentiable); schema.TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) { // Update the output data type to the output_datatype auto output_datatype = getAttribute(ctx, "output_datatype", static_cast(TensorProto_DataType_FLOAT)); @@ -3017,7 +3010,7 @@ ONNX_OPERATOR_SET_SCHEMA( static const char* MelWeightMatrix_ver17_doc = R"DOC( Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale. This function defines the mel scale in terms of a frequency in hertz according to the following formula: - + mel(f) = 2595 * log10(1 + f/700) In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0.