Skip to content

Commit

Permalink
Add yaml for reduce_sum OP (PaddlePaddle#41295)
Browse files Browse the repository at this point in the history
* Add yaml for reduce_sum OP

* Fix CI errors

* Fix CI errors

* Fix CI errors

* Fix CI errors
  • Loading branch information
From00 committed Apr 4, 2022
1 parent 50f8e97 commit 5936fa6
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 18 deletions.
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ set_tests_properties(test_generator_dataloader PROPERTIES TIMEOUT 120)
set_tests_properties(test_partial_concat_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_fuse_optimizer_pass PROPERTIES TIMEOUT 120)
set_tests_properties(test_softmax_with_cross_entropy_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 500)
set_tests_properties(test_adam_optimizer_fp32_fp64 PROPERTIES TIMEOUT 120)
set_tests_properties(test_elementwise_nn_grad PROPERTIES TIMEOUT 120)
set_tests_properties(test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass PROPERTIES TIMEOUT 120)
Expand Down
43 changes: 29 additions & 14 deletions python/paddle/fluid/tests/unittests/test_reduce_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,22 @@

class TestSumOp(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'dim': [0]}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp_fp16(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
Expand All @@ -50,22 +53,24 @@ def setUp(self):
self.gradient = self.calc_gradient()

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,

def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)


@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
Expand All @@ -79,12 +84,15 @@ def setUp(self):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_eager=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient)
place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)

def calc_gradient(self):
x = self.x
Expand All @@ -94,6 +102,7 @@ def calc_gradient(self):

class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
Expand All @@ -107,49 +116,55 @@ def setUp(self):
self.gradient = self.calc_gradient()

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,

def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)


class TestSumOp5D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp6D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestSumOp8D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
Expand All @@ -158,10 +173,10 @@ def setUp(self):
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


@skip_check_grad_ci(
Expand Down
13 changes: 12 additions & 1 deletion python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -904,7 +904,18 @@ def get_dtype(x, dtype):
return (False, src_type)

dtype_flag, dtype = get_dtype(x, dtype)
if paddle.in_dynamic_mode():

if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
else:
axis = axis if axis != None and axis != [] else [0]

out_dtype = convert_np_dtype_to_dtype_(dtype)
out = _C_ops.final_state_sum(x, axis, out_dtype, keepdim)
return out

if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1596,13 +1596,14 @@
# no_need_buffer : x, y

- api : sum
args : (Tensor x, int64_t[] axis={}, DataType dtype=DataType::UNDEFINED, bool keep_dim=false)
output : Tensor
args : (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : SumInferMeta
kernel :
func : sum
data_type : x
backward : sum_grad

# take_along_axis
- api : take_along_axis
Expand Down
10 changes: 10 additions & 0 deletions python/paddle/utils/code_gen/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1152,6 +1152,16 @@
kernel :
func : subtract_grad

- backward_api : sum_grad
forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : sum_grad

- backward_api : take_along_axis_grad
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
Expand Down

0 comments on commit 5936fa6

Please sign in to comment.