diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index de202ecf88cac..d14f8ae65feae 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -82,7 +82,13 @@ class ElementwiseOp : public framework::OperatorWithKernel { auto y_dims = ctx->GetInputDim("Y"); int max_dim = std::max(x_dims.size(), y_dims.size()); int axis = ctx->Attrs().Get("axis"); - axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis); + PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true, + platform::errors::InvalidArgument( + "The axis range must be [%s, %s), but axis is %s. " + "Please set the axis again.", + -1 * max_dim, max_dim, axis)); + axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1) + : axis); std::vector x_dims_array(max_dim); std::vector y_dims_array(max_dim); std::vector out_dims_array(max_dim); @@ -132,8 +138,7 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { "Y.dimension must be a subsequence of x.dimension. And axis " "is the start dimension index " "for broadcasting Y onto X. ") - .SetDefault(-1) - .EqualGreaterThan(-1); + .SetDefault(-1); AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") .SetDefault(false); AddAttr("x_data_format", "This parameter is no longer used.") diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 05e8c9c672780..63aba8ec71ce8 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -134,8 +134,6 @@ from .tensor.math import elementwise_add #DEFINE_ALIAS from .tensor.math import elementwise_div #DEFINE_ALIAS from .tensor.math import elementwise_floordiv #DEFINE_ALIAS -from .tensor.math import elementwise_max #DEFINE_ALIAS -from .tensor.math import elementwise_min #DEFINE_ALIAS from .tensor.math import elementwise_mod #DEFINE_ALIAS from .tensor.math import elementwise_pow #DEFINE_ALIAS from .tensor.math import elementwise_sub #DEFINE_ALIAS @@ -164,7 +162,9 @@ from .tensor.math import tanh #DEFINE_ALIAS from .tensor.math import elementwise_sum #DEFINE_ALIAS from .tensor.math import max #DEFINE_ALIAS +from .tensor.math import maximum #DEFINE_ALIAS from .tensor.math import min #DEFINE_ALIAS +from .tensor.math import minimum #DEFINE_ALIAS from .tensor.math import mm #DEFINE_ALIAS from .tensor.math import div #DEFINE_ALIAS from .tensor.math import multiply #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_max_op.py b/python/paddle/fluid/tests/unittests/test_max_op.py new file mode 100644 index 0000000000000..75ccaacc3c303 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_max_op.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, skip_check_grad_ci +import paddle +import paddle.fluid.core as core + + +class ApiMaxTest(unittest.TestCase): + def setUp(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + def test_api(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = paddle.nn.data("data", shape=[10, 10], dtype="float32") + result_max = paddle.max(x=data, axis=1) + exe = paddle.static.Executor(self.place) + input_data = np.random.rand(10, 10).astype(np.float32) + res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) + self.assertEqual((res == np.max(input_data, axis=1)).all(), True) + + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + result_max = paddle.max(x=data, axis=0) + exe = paddle.static.Executor(self.place) + input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) + res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) + self.assertEqual((res == np.max(input_data, axis=0)).all(), True) + + def test_errors(self): + paddle.enable_static() + + def test_input_type(): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = np.random.rand(10, 10) + result_max = paddle.max(x=data, axis=0) + + self.assertRaises(TypeError, test_input_type) + + def test_imperative_api(self): + paddle.disable_static() + np_x = np.array([10, 10]).astype('float64') + x = paddle.to_variable(np_x) + z = paddle.max(x, axis=0) + np_z = z.numpy() + z_expected = np.array(np.max(np_x, axis=0)) + self.assertEqual((np_z == z_expected).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_maximum_op.py b/python/paddle/fluid/tests/unittests/test_maximum_op.py new file mode 100644 index 0000000000000..bed2b57ec5969 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_maximum_op.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, skip_check_grad_ci +import paddle +import paddle.fluid.core as core + + +class ApiMaximumTest(unittest.TestCase): + def setUp(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + self.input_x = np.random.rand(10, 15).astype("float32") + self.input_y = np.random.rand(10, 15).astype("float32") + self.input_z = np.random.rand(15).astype("float32") + + def test_static_api(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") + data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32") + result_max = paddle.maximum(data_x, data_y) + exe = paddle.static.Executor(self.place) + res, = exe.run(feed={"x": self.input_x, + "y": self.input_y}, + fetch_list=[result_max]) + self.assertEqual((res == np.maximum(self.input_x, self.input_y)).all(), + True) + + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") + data_z = paddle.nn.data("z", shape=[15], dtype="float32") + result_max = paddle.maximum(data_x, data_z, axis=1) + exe = paddle.static.Executor(self.place) + res, = exe.run(feed={"x": self.input_x, + "z": self.input_z}, + fetch_list=[result_max]) + self.assertEqual((res == np.maximum(self.input_x, self.input_z)).all(), + True) + + def test_dynamic_api(self): + paddle.disable_static() + np_x = np.array([10, 10]).astype('float64') + x = paddle.to_variable(self.input_x) + y = paddle.to_variable(self.input_y) + z = paddle.maximum(x, y) + np_z = z.numpy() + z_expected = np.array(np.maximum(self.input_x, self.input_y)) + self.assertEqual((np_z == z_expected).all(), True) + + def test_broadcast_axis(self): + paddle.disable_static() + np_x = np.random.rand(5, 4, 3, 2).astype("float64") + np_y = np.random.rand(4, 3).astype("float64") + + x = paddle.to_variable(self.input_x) + y = paddle.to_variable(self.input_y) + result_1 = paddle.maximum(x, y, axis=1) + result_2 = paddle.maximum(x, y, axis=-2) + self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py new file mode 100644 index 0000000000000..3dbda66e2a2cf --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, skip_check_grad_ci +import paddle +import paddle.fluid.core as core + + +class ApiMinTest(unittest.TestCase): + def setUp(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + def test_api(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = paddle.nn.data("data", shape=[10, 10], dtype="float32") + result_min = paddle.min(x=data, axis=1) + exe = paddle.static.Executor(self.place) + input_data = np.random.rand(10, 10).astype(np.float32) + res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) + self.assertEqual((res == np.min(input_data, axis=1)).all(), True) + + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + result_min = paddle.min(x=data, axis=0) + exe = paddle.static.Executor(self.place) + input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) + res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) + self.assertEqual((res == np.min(input_data, axis=0)).all(), True) + + def test_errors(self): + paddle.enable_static() + + def test_input_type(): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data = np.random.rand(10, 10) + result_min = paddle.min(x=data, axis=0) + + self.assertRaises(TypeError, test_input_type) + + def test_imperative_api(self): + paddle.disable_static() + np_x = np.array([10, 10]).astype('float64') + x = paddle.to_variable(np_x) + z = paddle.min(x, axis=0) + np_z = z.numpy() + z_expected = np.array(np.min(np_x, axis=0)) + self.assertEqual((np_z == z_expected).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_minimum_op.py b/python/paddle/fluid/tests/unittests/test_minimum_op.py new file mode 100644 index 0000000000000..550580407acf2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_minimum_op.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, skip_check_grad_ci +import paddle +import paddle.fluid.core as core + + +class ApiMinimumTest(unittest.TestCase): + def setUp(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + self.input_x = np.random.rand(10, 15).astype("float32") + self.input_y = np.random.rand(10, 15).astype("float32") + self.input_z = np.random.rand(15).astype("float32") + + def test_static_api(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") + data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32") + result_min = paddle.minimum(data_x, data_y) + exe = paddle.static.Executor(self.place) + res, = exe.run(feed={"x": self.input_x, + "y": self.input_y}, + fetch_list=[result_min]) + self.assertEqual((res == np.minimum(self.input_x, self.input_y)).all(), + True) + + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") + data_z = paddle.nn.data("z", shape=[15], dtype="float32") + result_min = paddle.minimum(data_x, data_z, axis=1) + exe = paddle.static.Executor(self.place) + res, = exe.run(feed={"x": self.input_x, + "z": self.input_z}, + fetch_list=[result_min]) + self.assertEqual((res == np.minimum(self.input_x, self.input_z)).all(), + True) + + def test_dynamic_api(self): + paddle.disable_static() + np_x = np.array([10, 10]).astype('float64') + x = paddle.to_variable(self.input_x) + y = paddle.to_variable(self.input_y) + z = paddle.minimum(x, y) + np_z = z.numpy() + z_expected = np.array(np.minimum(self.input_x, self.input_y)) + self.assertEqual((np_z == z_expected).all(), True) + + def test_broadcast_axis(self): + paddle.disable_static() + np_x = np.random.rand(5, 4, 3, 2).astype("float64") + np_y = np.random.rand(4, 3).astype("float64") + + x = paddle.to_variable(self.input_x) + y = paddle.to_variable(self.input_y) + result_1 = paddle.minimum(x, y, axis=1) + result_2 = paddle.minimum(x, y, axis=-2) + self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 16874d80112bb..82cfe0a2423f3 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -628,69 +628,5 @@ def test_1(self): self.assertEqual((np_z == z_expected).all(), True) -class API_TestMaxOp(unittest.TestCase): - def test_1(self): - # type: float - with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=[10, 10], dtype="float32") - result_max = paddle.max(input=data, dim=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - input_data = np.random.rand(10, 10).astype(np.float32) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) - self.assertEqual((res == np.max(input_data, axis=1)).all(), True) - - # type: int - with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=[10, 10], dtype="int64") - result_max = paddle.max(input=data, dim=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_max]) - self.assertEqual((res == np.max(input_data, axis=1)).all(), True) - - # dygraph - with fluid.dygraph.guard(): - np_x = np.array([10, 10]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - z = paddle.max(x, dim=0) - np_z = z.numpy() - z_expected = np.array(np.max(np_x, axis=0)) - self.assertEqual((np_z == z_expected).all(), True) - - -class API_TestMinOp(unittest.TestCase): - def test_1(self): - # type: float - with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=[10, 10], dtype="float32") - result_min = paddle.min(input=data, dim=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - input_data = np.random.rand(10, 10).astype(np.float32) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) - self.assertEqual((res == np.min(input_data, axis=1)).all(), True) - - # type: int - with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=[10, 10], dtype="int64") - result_min = paddle.min(input=data, dim=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) - res, = exe.run(feed={"data": input_data}, fetch_list=[result_min]) - self.assertEqual((res == np.min(input_data, axis=1)).all(), True) - - # dygraph - with fluid.dygraph.guard(): - np_x = np.array([10, 10]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - z = paddle.min(x, dim=0) - np_z = z.numpy() - z_expected = np.array(np.min(np_x, axis=0)) - self.assertEqual((np_z == z_expected).all(), True) - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 21cae803716a9..a295aae5de2de 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -110,8 +110,6 @@ from .math import elementwise_add #DEFINE_ALIAS from .math import elementwise_div #DEFINE_ALIAS from .math import elementwise_floordiv #DEFINE_ALIAS -from .math import elementwise_max #DEFINE_ALIAS -from .math import elementwise_min #DEFINE_ALIAS from .math import elementwise_mod #DEFINE_ALIAS from .math import elementwise_pow #DEFINE_ALIAS from .math import elementwise_sub #DEFINE_ALIAS @@ -140,7 +138,9 @@ from .math import tanh #DEFINE_ALIAS from .math import elementwise_sum #DEFINE_ALIAS from .math import max #DEFINE_ALIAS +from .math import maximum #DEFINE_ALIAS from .math import min #DEFINE_ALIAS +from .math import minimum #DEFINE_ALIAS from .math import mm #DEFINE_ALIAS from .math import div #DEFINE_ALIAS from .math import multiply #DEFINE_ALIAS diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 60994a5165942..893b2cfde819e 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -36,8 +36,6 @@ from ..fluid.layers import elementwise_add #DEFINE_ALIAS from ..fluid.layers import elementwise_div #DEFINE_ALIAS from ..fluid.layers import elementwise_floordiv #DEFINE_ALIAS -from ..fluid.layers import elementwise_max #DEFINE_ALIAS -from ..fluid.layers import elementwise_min #DEFINE_ALIAS from ..fluid.layers import elementwise_mod #DEFINE_ALIAS from ..fluid.layers import elementwise_mul #DEFINE_ALIAS from ..fluid.layers import elementwise_pow #DEFINE_ALIAS @@ -78,8 +76,6 @@ 'elementwise_add', 'elementwise_div', 'elementwise_floordiv', - 'elementwise_max', - 'elementwise_min', 'elementwise_mod', 'elementwise_pow', 'elementwise_sub', @@ -109,7 +105,9 @@ 'tanh', 'elementwise_sum', 'max', + 'maximum', 'min', + 'minimum', 'mm', 'div', 'multiply', @@ -511,13 +509,117 @@ def multiply(x, y, axis=-1, name=None): return _elementwise_op(LayerHelper(op_type, **locals())) +def maximum(x, y, axis=-1, name=None): + """ +Examples: + + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) + y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.maximum(x, y) + print(res.numpy()) + #[[5. 6.] + # [7. 8.]] + + x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) + y_data = np.array([1, 2], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.maximum(x, y, axis=1) + print(res.numpy()) + #[[[1. 2. 3.] + # [2. 2. 3.]]] + + x_data = np.array([2, 3, 5], dtype=np.float32) + y_data = np.array([1, 4, np.nan], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 2. 4. nan] + + x_data = np.array([5, 3, np.inf], dtype=np.float32) + y_data = np.array([1, 4, 5], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.maximum(x, y) + print(res.numpy()) + #[ 5. 4. inf] + """ + op_type = 'elementwise_max' + act = None + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type) + return _elementwise_op(LayerHelper(op_type, **locals())) + +def minimum(x, y, axis=-1, name=None): + """ +Examples: + + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + + x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) + y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.minimum(x, y) + print(res.numpy()) + #[[1. 2.] + # [3. 4.]] + + x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) + y_data = np.array([1, 2], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.minimum(x, y, axis=1) + print(res.numpy()) + #[[[1. 1. 1.] + # [2. 2. 2.]]] + + x_data = np.array([2, 3, 5], dtype=np.float32) + y_data = np.array([1, 4, np.nan], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.minimum(x, y) + print(res.numpy()) + #[ 1. 3. nan] + + x_data = np.array([5, 3, np.inf], dtype=np.float32) + y_data = np.array([1, 4, 5], dtype=np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + res = paddle.minimum(x, y) + print(res.numpy()) + #[1. 3. 5.] + """ + op_type = 'elementwise_min' + act = None + if in_dygraph_mode(): + return _elementwise_op_in_dygraph( + x, y, axis=axis, act=act, op_name=op_type) + return _elementwise_op(LayerHelper(op_type, **locals())) for func in [ add, div, - multiply, + maximum, + minimum, + multiply ]: - proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'multiply': 'elementwise_mul'} + proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div', 'maximum': 'elementwise_max', 'minimum': 'elementwise_min', 'multiply': 'elementwise_mul'} op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__]) if func.__name__ in ['add']: alias_main = ':alias_main: paddle.%(func)s' % {'func': func.__name__} @@ -1065,152 +1167,179 @@ def _check_input(input): return out -def max(input, dim=None, keep_dim=False, name=None): +def max(x, axis=None, keepdim=False, name=None): """ - :alias_main: paddle.max - :alias: paddle.max,paddle.tensor.max,paddle.tensor.math.max - Computes the maximum of tensor elements over the given dimension. + Computes the maximum of tensor elements over the given axis. Args: - input (Variable): The input variable which is a Tensor, the data type is float32, + x(Tensor): A tensor, the data type is float32, float64, int32, int64. - dim (list|int, optional): The dimension along which the maximum is computed. + axis(list|int, optional): The axis along which the maximum is computed. If :attr:`None`, compute the maximum over all elements of :attr:`input` and return a Tensor variable with a single element, - otherwise must be in the range :math:`[-rank(input), rank(input))`. - If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. - keep_dim (bool, optional): Whether to reserve the reduced dimension in the + otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`. + If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`. + keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension - than the :attr:`input` unless :attr:`keep_dim` is true, default + than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: Tensor, results of maximum on the specified dim of input tensor, + Tensor, results of maximum on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python + + import numpy as np import paddle - import paddle.fluid as fluid - # x is a Tensor variable with following elements: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # Each example is followed by the corresponding output tensor. - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - paddle.max(x) # [0.9] - paddle.max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] - paddle.max(x, dim=-1) # [0.9, 0.7] - paddle.max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] - # y is a Tensor variable with shape [2, 2, 2] and elements as below: - # [[[1.0, 2.0], [3.0, 4.0]], - # [[5.0, 6.0], [7.0, 8.0]]] - # Each example is followed by the corresponding output tensor. - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - paddle.max(y, dim=[1, 2]) # [4.0, 8.0] - paddle.max(y, dim=[0, 1]) # [7.0, 8.0] + paddle.disable_static() + + # data_x is a variable with shape [2, 4] + # the axis is a int element + data_x = np.array([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) + x = paddle.to_variable(data_x) + result1 = paddle.max(x) + print(result1.numpy()) + #[0.9] + result2 = paddle.max(x, axis=0) + print(result2.numpy()) + #[0.2 0.3 0.6 0.9] + result3 = paddle.max(x, axis=-1) + print(result3.numpy()) + #[0.9 0.7] + result4 = paddle.max(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.9] + # [0.7]] + + # data_y is a variable with shape [2, 2, 2] + # the axis is list + data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + y = paddle.to_variable(data_y) + result5 = paddle.max(y, axis=[1, 2]) + print(result5.numpy()) + #[4. 8.] + result6 = paddle.max(y, axis=[0, 1]) + print(result6.numpy()) + #[7. 8.] """ - helper = LayerHelper('max', **locals()) - out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) - if dim is not None and not isinstance(dim, list): - dim = [dim] + if axis is not None and not isinstance(axis, list): + axis = [axis] + reduce_all = True if axis == None or axis == [] else False + axis = axis if axis != None and axis != [] else [0] + if in_dygraph_mode(): + return core.ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim, + 'reduce_all', reduce_all) + helper = LayerHelper('max', **locals()) check_variable_and_dtype( - input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max') - - reduce_all = True if dim == None or dim == [] else False - dim = dim if dim != None and dim != [] else [0] + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max') - if in_dygraph_mode(): - return core.ops.reduce_max(input, 'dim', dim, 'keep_dim', keep_dim, - 'reduce_all', reduce_all) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='reduce_max', - inputs={'X': input}, + inputs={'X': x}, outputs={'Out': out}, attrs={ - 'dim': dim, - 'keep_dim': keep_dim, + 'dim': axis, + 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out - -def min(input, dim=None, keep_dim=False, name=None): +def min(x, axis=None, keepdim=False, name=None): """ - :alias_main: paddle.min - :alias: paddle.min,paddle.tensor.min,paddle.tensor.math.min - Computes the minimum of tensor elements over the given dimension. + Computes the minimum of tensor elements over the given axis Args: - input (Variable): The input variable which is a Tensor, the data type is float32, - float64, int32, int64. - dim (list|int, optional): The dimensions along which the minimum is computed. + x(Tensor): A tensor, the data type is float32, float64, int32, int64. + axis(list|int, optional): The axis along which the minimum is computed. If :attr:`None`, compute the minimum over all elements of :attr:`input` and return a Tensor variable with a single element, - otherwise must be in the range :math:`[-rank(input), rank(input))`. - If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. - keep_dim (bool, optional): Whether to reserve the reduced dimension in the + otherwise must be in the range :math:`[-x.ndim, x.ndim)`. + If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`. + keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension - than the :attr:`input` unless :attr:`keep_dim` is true, default + than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: Tensor, result of minimum on the specified dim of input tensor, + Tensor, results of minimum on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python - import paddle - import paddle.fluid as fluid - # x is a Tensor variable with following elements: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # Each example is followed by the corresponding output tensor. - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - paddle.min(x) # [0.1] - paddle.min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] - paddle.min(x, dim=-1) # [0.2, 0.1] - paddle.min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] - # y is a Tensor variable with shape [2, 2, 2] and elements as below: - # [[[1.0, 2.0], [3.0, 4.0]], - # [[5.0, 6.0], [7.0, 8.0]]] - # Each example is followed by the corresponding output tensor. - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - paddle.min(y, dim=[1, 2]) # [1.0, 5.0] - paddle.min(y, dim=[0, 1]) # [1.0, 2.0] - """ - helper = LayerHelper('min', **locals()) - out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype()) - if dim is not None and not isinstance(dim, list): - dim = [dim] + import numpy as np + import paddle - check_variable_and_dtype( - input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max') + paddle.disable_static() - reduce_all = True if dim == None or dim == [] else False - dim = dim if dim != None and dim != [] else [0] + # data_x is a variable with shape [2, 4] + # the axis is a int element + data_x = np.array([[0.2, 0.3, 0.5, 0.9], + [0.1, 0.2, 0.6, 0.7]]) + x = paddle.to_variable(data_x) + result1 = paddle.min(x) + print(result1.numpy()) + #[0.1] + result2 = paddle.min(x, axis=0) + print(result2.numpy()) + #[0.1 0.2 0.5 0.7] + result3 = paddle.min(x, axis=-1) + print(result3.numpy()) + #[0.2 0.1] + result4 = paddle.min(x, axis=1, keepdim=True) + print(result4.numpy()) + #[[0.2] + # [0.1]] + + # data_y is a variable with shape [2, 2, 2] + # the axis is list + data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]]]) + y = paddle.to_variable(data_y) + result5 = paddle.min(y, axis=[1, 2]) + print(result5.numpy()) + #[1. 5.] + result6 = paddle.min(y, axis=[0, 1]) + print(result6.numpy()) + #[1. 2.] + """ + if axis is not None and not isinstance(axis, list): + axis= [axis] + reduce_all = True if axis == None or axis == [] else False + axis = axis if axis != None and axis != [] else [0] if in_dygraph_mode(): - return core.ops.reduce_min(input, 'dim', dim, 'keep_dim', keep_dim, + return core.ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all) + + helper = LayerHelper('min', **locals()) + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min') + + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='reduce_min', - inputs={'X': input}, + inputs={'X': x}, outputs={'Out': out}, attrs={ - 'dim': dim, - 'keep_dim': keep_dim, + 'dim': axis, + 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out