diff --git a/paddle/fluid/operators/cum_op.cc b/paddle/fluid/operators/cum_op.cc index 4c23020413ee5..3bd2c4979cb0b 100644 --- a/paddle/fluid/operators/cum_op.cc +++ b/paddle/fluid/operators/cum_op.cc @@ -15,6 +15,9 @@ limitations under the License. */ #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" +#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" +#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" +#include "paddle/fluid/prim/utils/static/desc_tensor.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/unary.h" @@ -100,6 +103,27 @@ class CumsumGradMaker : public framework::SingleGradOpMaker { } }; +class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; + + public: + void Apply() override { + paddle::experimental::Tensor x = this->GetSingleForwardInput("X"); + paddle::experimental::Tensor out_grad = this->GetSingleOutputGrad("Out"); + paddle::experimental::Tensor dx = this->GetSingleInputGrad("X"); + auto* dx_ptr = this->GetOutputPtr(&dx); + std::string dx_name = this->GetOutputName(dx); + int axis = static_cast(this->Attr("axis")); + bool flatten = static_cast(this->Attr("flatten")); + bool exclusive = static_cast(this->Attr("exclusive")); + bool reverse = static_cast(this->Attr("reverse")); + VLOG(6) << "Runing add_grad composite func"; + prim::cumsum_grad( + x, out_grad, axis, flatten, exclusive, reverse, dx_ptr); + this->RecoverOutputName(dx, dx_name); + } +}; + class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -182,6 +206,7 @@ DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp, REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, + ops::CumsumCompositeGradOpMaker, ops::CumsumGradMaker, ops::CumsumGradMaker, CumsumInferShapeFunctor); diff --git a/paddle/fluid/prim/api/api.yaml b/paddle/fluid/prim/api/api.yaml index 9f9c2763a4cc5..e47c7a45713dc 100644 --- a/paddle/fluid/prim/api/api.yaml +++ b/paddle/fluid/prim/api/api.yaml @@ -25,3 +25,4 @@ - tile - transpose - pad +- cumsum diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 28682cc24e22c..fd3b4fe2f9a8e 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -414,5 +414,20 @@ void slice_grad(const Tensor& input, } } +template +void cumsum_grad(const Tensor& x, + const Tensor& out_grad, + const Scalar& axis, + bool flatten, + bool exclusive, + bool reverse, + Tensor* x_grad) { + if (x_grad) { + auto grad = cumsum(out_grad, axis, flatten, exclusive, !reverse); + grad = reshape(grad, x.shape()); + set_output(grad, x_grad); + } +} + } // namespace prim } // namespace paddle diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 5ab4c714092dc..23ff93adfc06e 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -313,6 +313,7 @@ kernel : func : cumsum_grad data_type: x + composite: cumsum_grad(x, out_grad, axis, flatten, exclusive, reverse, x_grad) - backward_op : deformable_conv_grad forward : deformable_conv(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) -> Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 1eb10cf637c9c..4984a23670b66 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -365,6 +365,17 @@ outputs : out : Out +- op : cumsum + backward: cumsum_grad + inputs : + x : X + outputs : + out : Out + scalar: + axis: + data_type : int + tensor_name: AxisTensor + - op : data_norm backward : data_norm_grad extra : diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 3a51d9c6d92e4..1194bd91033fc 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -115,6 +115,9 @@ def test_name(self): class TestSumOp1(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} @@ -123,12 +126,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOp2(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = { @@ -141,12 +147,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOp3(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 1} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} @@ -155,12 +164,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOp4(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 0} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} @@ -175,6 +187,9 @@ def test_check_grad(self): class TestSumOp5(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.inputs = {'X': np.random.random((5, 20)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} @@ -188,6 +203,9 @@ def test_check_grad(self): class TestSumOp7(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.inputs = {'X': np.random.random((100)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} @@ -226,6 +244,9 @@ def test_main(self): class TestSumOpExclusive1(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 20)).astype("float64") self.inputs = {'X': a} @@ -243,12 +264,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpExclusive2(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 100)).astype("float64") self.inputs = {'X': a} @@ -266,12 +290,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpExclusive3(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 20)).astype("float64") self.inputs = {'X': a} @@ -289,12 +316,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpExclusive4(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 100)).astype("float64") self.inputs = {'X': a} @@ -312,12 +342,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpExclusive5(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 40)).astype("float64") self.inputs = {'X': a} @@ -335,12 +368,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpExclusiveFP16(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"} a = np.random.random((4, 5, 20)).astype("float64") self.inputs = {'X': a} @@ -358,12 +394,15 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestSumOpReverseExclusive(OpTest): def setUp(self): self.op_type = "cumsum" + self.prim_op_type = "prim" + self.python_api = paddle.cumsum + self.enable_cinn = False self.attrs = {'axis': 2, 'reverse': True, "exclusive": True} a = np.random.random((4, 5, 6)).astype("float64") self.inputs = {'X': a} @@ -382,7 +421,7 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class BadInputTest(unittest.TestCase):