Skip to content

Commit

Permalink
Add new API: paddle.prod
Browse files Browse the repository at this point in the history
test=develop
  • Loading branch information
gfwm2013 committed Aug 18, 2020
1 parent 5123131 commit fceb02c
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 28 deletions.
2 changes: 1 addition & 1 deletion python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4594,7 +4594,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the product is performed. If
dim (int|list|tuple, optional): The dimensions along which the product is performed. If
:attr:`None`, multiply all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
Expand Down
55 changes: 32 additions & 23 deletions python/paddle/fluid/tests/unittests/test_prod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from __future__ import print_function

import paddle
import paddle.fluid as fluid
import unittest
import numpy as np

Expand All @@ -34,6 +33,10 @@ def run_imperative(self):
expected_result = np.prod(self.input, axis=1)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=-1)
expected_result = np.prod(self.input, axis=-1)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=[0, 1])
expected_result = np.prod(self.input, axis=(0, 1))
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))
Expand All @@ -52,58 +55,64 @@ def run_imperative(self):
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

def run_static(self, use_gpu=False):
input = fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=[0, 1])
result3 = paddle.prod(input, axis=1, keepdim=True)
result4 = paddle.prod(input, axis=1, dtype='int64')
result5 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

place = fluid.CUDAPlace(4) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
static_result = exe.run(
feed={"input": self.input},
fetch_list=[result0, result1, result2, result3, result4, result5])
result2 = paddle.prod(input, axis=-1)
result3 = paddle.prod(input, axis=[0, 1])
result4 = paddle.prod(input, axis=1, keepdim=True)
result5 = paddle.prod(input, axis=1, dtype='int64')
result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
static_result = exe.run(feed={"input": self.input},
fetch_list=[
result0, result1, result2, result3, result4,
result5, result6
])

expected_result = np.prod(self.input)
self.assertTrue(np.allclose(static_result[0], expected_result))
expected_result = np.prod(self.input, axis=1)
self.assertTrue(np.allclose(static_result[1], expected_result))
expected_result = np.prod(self.input, axis=(0, 1))
expected_result = np.prod(self.input, axis=-1)
self.assertTrue(np.allclose(static_result[2], expected_result))
expected_result = np.prod(self.input, axis=1, keepdims=True)
expected_result = np.prod(self.input, axis=(0, 1))
self.assertTrue(np.allclose(static_result[3], expected_result))
expected_result = np.prod(self.input, axis=1, dtype=np.int64)
expected_result = np.prod(self.input, axis=1, keepdims=True)
self.assertTrue(np.allclose(static_result[4], expected_result))
expected_result = np.prod(self.input, axis=1, dtype=np.int64)
self.assertTrue(np.allclose(static_result[5], expected_result))
expected_result = np.prod(
self.input, axis=1, keepdims=True, dtype=np.int64)
self.assertTrue(np.allclose(static_result[5], expected_result))
self.assertTrue(np.allclose(static_result[6], expected_result))

def test_cpu(self):
paddle.disable_static(place=paddle.fluid.CPUPlace())
paddle.disable_static(place=paddle.CPUPlace())
self.run_imperative()
paddle.enable_static()

with fluid.program_guard(fluid.Program()):
with paddle.static.program_guard(paddle.static.Program()):
self.run_static()

def test_gpu(self):
if not fluid.core.is_compiled_with_cuda():
if not paddle.fluid.core.is_compiled_with_cuda():
return

paddle.disable_static(place=paddle.fluid.CUDAPlace(4))
paddle.disable_static(place=paddle.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()

with fluid.program_guard(fluid.Program()):
with paddle.static.program_guard(paddle.static.Program()):
self.run_static(use_gpu=True)


class TestProdOpError(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32')
bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
# The argument x shoule be a Tensor
Expand Down
13 changes: 9 additions & 4 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1630,14 +1630,15 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
Compute the product of tensor elements over the given axis.
Args:
x(Tensor): Input of prod operator. The data type is float32, float64, int32, int64.
axis(list|int, optional): The axis along which the product is computed. If :attr:`None`,
x(Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`,
multiply all elements of `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
dtype(str, optional): The desired date type of returned tensor, can be float32, float64,
dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64,
int32, int64. If specified, the input tensor is casted to dtype before operator performed.
This is very useful for avoiding data type overflows. The default value is False.
This is very useful for avoiding data type overflows. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
tensor will have one fewer dimension than the input unless keep_dim is true. Default is False.
name(string, optional): The default value is None. Normally there is no need for user to set this property.
Expand All @@ -1646,6 +1647,10 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
Returns:
Tensor, result of product on the specified dim of input tensor.
Raises:
ValueError: The :attr:`dtype` must be float32, float64, int32 or int64.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
Expand Down

1 comment on commit fceb02c

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on fceb02c Aug 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 Commit ID: fceb02c contains failed CI.

Please sign in to comment.