diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b481b5e65dad3..aa30d2b7a7770 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4594,7 +4594,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. - dim (list|int, optional): The dimensions along which the product is performed. If + dim (int|list|tuple, optional): The dimensions along which the product is performed. If :attr:`None`, multiply all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index fe9abad77dbd5..158683907253e 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -15,7 +15,6 @@ from __future__ import print_function import paddle -import paddle.fluid as fluid import unittest import numpy as np @@ -34,6 +33,10 @@ def run_imperative(self): expected_result = np.prod(self.input, axis=1) self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + dy_result = paddle.prod(input, axis=-1) + expected_result = np.prod(self.input, axis=-1) + self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + dy_result = paddle.prod(input, axis=[0, 1]) expected_result = np.prod(self.input, axis=(0, 1)) self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) @@ -52,58 +55,64 @@ def run_imperative(self): self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) def run_static(self, use_gpu=False): - input = fluid.data(name='input', shape=[10, 10, 5], dtype='float32') + input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32') result0 = paddle.prod(input) result1 = paddle.prod(input, axis=1) - result2 = paddle.prod(input, axis=[0, 1]) - result3 = paddle.prod(input, axis=1, keepdim=True) - result4 = paddle.prod(input, axis=1, dtype='int64') - result5 = paddle.prod(input, axis=1, keepdim=True, dtype='int64') - - place = fluid.CUDAPlace(4) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - static_result = exe.run( - feed={"input": self.input}, - fetch_list=[result0, result1, result2, result3, result4, result5]) + result2 = paddle.prod(input, axis=-1) + result3 = paddle.prod(input, axis=[0, 1]) + result4 = paddle.prod(input, axis=1, keepdim=True) + result5 = paddle.prod(input, axis=1, dtype='int64') + result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64') + + place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + static_result = exe.run(feed={"input": self.input}, + fetch_list=[ + result0, result1, result2, result3, result4, + result5, result6 + ]) expected_result = np.prod(self.input) self.assertTrue(np.allclose(static_result[0], expected_result)) expected_result = np.prod(self.input, axis=1) self.assertTrue(np.allclose(static_result[1], expected_result)) - expected_result = np.prod(self.input, axis=(0, 1)) + expected_result = np.prod(self.input, axis=-1) self.assertTrue(np.allclose(static_result[2], expected_result)) - expected_result = np.prod(self.input, axis=1, keepdims=True) + expected_result = np.prod(self.input, axis=(0, 1)) self.assertTrue(np.allclose(static_result[3], expected_result)) - expected_result = np.prod(self.input, axis=1, dtype=np.int64) + expected_result = np.prod(self.input, axis=1, keepdims=True) self.assertTrue(np.allclose(static_result[4], expected_result)) + expected_result = np.prod(self.input, axis=1, dtype=np.int64) + self.assertTrue(np.allclose(static_result[5], expected_result)) expected_result = np.prod( self.input, axis=1, keepdims=True, dtype=np.int64) - self.assertTrue(np.allclose(static_result[5], expected_result)) + self.assertTrue(np.allclose(static_result[6], expected_result)) def test_cpu(self): - paddle.disable_static(place=paddle.fluid.CPUPlace()) + paddle.disable_static(place=paddle.CPUPlace()) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with paddle.static.program_guard(paddle.static.Program()): self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not paddle.fluid.core.is_compiled_with_cuda(): return - paddle.disable_static(place=paddle.fluid.CUDAPlace(4)) + paddle.disable_static(place=paddle.CUDAPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with paddle.static.program_guard(paddle.static.Program()): self.run_static(use_gpu=True) class TestProdOpError(unittest.TestCase): def test_error(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32') bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool') # The argument x shoule be a Tensor diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c3d0aeffd6876..442ca8cabdad7 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1630,14 +1630,15 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): Compute the product of tensor elements over the given axis. Args: - x(Tensor): Input of prod operator. The data type is float32, float64, int32, int64. - axis(list|int, optional): The axis along which the product is computed. If :attr:`None`, + x(Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64. + axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`, multiply all elements of `x` and return a Tensor with a single element, otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`, the axis to reduce is :math:`x.ndim + axis[i]`. Default is None. - dtype(str, optional): The desired date type of returned tensor, can be float32, float64, + dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before operator performed. - This is very useful for avoiding data type overflows. The default value is False. + This is very useful for avoiding data type overflows. The default value is None, the dtype + of output is the same as input Tensor `x`. keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the input unless keep_dim is true. Default is False. name(string, optional): The default value is None. Normally there is no need for user to set this property. @@ -1646,6 +1647,10 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): Returns: Tensor, result of product on the specified dim of input tensor. + Raises: + ValueError: The :attr:`dtype` must be float32, float64, int32 or int64. + TypeError: The type of :attr:`axis` must be int, list or tuple. + Examples: .. code-block:: python