From 9dd94a513c0834c08535fff4138959ed777054c6 Mon Sep 17 00:00:00 2001 From: xiaoye <50870160+xiaoyewww@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:05:36 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90complex=E3=80=91=20No.32=20support=20c?= =?UTF-8?q?omplex=20for=20softsign=20(#58545)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../phi/kernels/cpu/activation_grad_kernel.cc | 3 +- paddle/phi/kernels/cpu/activation_kernel.cc | 2 +- paddle/phi/kernels/funcs/activation_functor.h | 54 +++++++++++++++++++ .../phi/kernels/gpu/activation_grad_kernel.cu | 3 +- paddle/phi/kernels/gpu/activation_kernel.cu | 2 +- python/paddle/nn/functional/activation.py | 2 +- test/legacy_test/test_activation_op.py | 15 ++++++ 7 files changed, 76 insertions(+), 5 deletions(-) diff --git a/paddle/phi/kernels/cpu/activation_grad_kernel.cc b/paddle/phi/kernels/cpu/activation_grad_kernel.cc index 65bde5601128f..84ec899d9d399 100644 --- a/paddle/phi/kernels/cpu/activation_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_grad_kernel.cc @@ -423,7 +423,8 @@ PD_REGISTER_KERNEL(cos_triple_grad, phi::dtype::complex, phi::dtype::complex) {} -PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(softsign_grad, + SoftsignGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(sigmoid_grad, SigmoidGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(sigmoid_double_grad, SigmoidDoubleGradKernel) diff --git a/paddle/phi/kernels/cpu/activation_kernel.cc b/paddle/phi/kernels/cpu/activation_kernel.cc index a8169df1021d2..e704eefc54ebb 100644 --- a/paddle/phi/kernels/cpu/activation_kernel.cc +++ b/paddle/phi/kernels/cpu/activation_kernel.cc @@ -230,7 +230,7 @@ PD_REGISTER_KERNEL(expm1, PD_REGISTER_KERNEL(logit, CPU, ALL_LAYOUT, phi::LogitKernel, float, double) {} PD_REGISTER_KERNEL( square, CPU, ALL_LAYOUT, phi::SquareKernel, float, double, int, int64_t) {} -PD_REGISTER_ACTIVATION_KERNEL(softsign, SoftsignKernel) +PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(softsign, SoftsignKernel) PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(sigmoid, SigmoidKernel) PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(logsigmoid, LogSigmoidKernel) PD_REGISTER_ACTIVATION_KERNEL(hardsigmoid, HardSigmoidKernel) diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index b2c2d493c48ad..06b59644cf11d 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -107,6 +107,14 @@ struct Conj { } }; +// T is phi::dtype::complex or phi::dtype::complex +template +struct Real { + HOSTDEVICE ComplexType operator()(const ComplexType& val) const { + return ComplexType(val.real); + } +}; + // sine'(x) = cos(x) template struct SinGradFunctor : public BaseActivationFunctor { @@ -2129,6 +2137,24 @@ struct SoftsignGradFunctor : public BaseActivationFunctor { static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } }; +template +struct SoftsignGradFunctor> + : public BaseActivationFunctor> { + template + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { + ComplexType one = static_cast>(1.0f); + auto temp = (-x / (one + x.abs()).square()).unaryExpr(Real()); + + dx.device(d) = dout * (one / (one + x.abs()) + temp * x / x.abs()); + } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } +}; + // sigmoid(x) = 1 / (1 + exp(-x)) template struct SigmoidFunctor : public BaseActivationFunctor { @@ -4339,6 +4365,17 @@ struct CudaSoftsignFunctor : public BaseActivationFunctor { } }; +template +struct CudaSoftsignFunctor> + : public BaseActivationFunctor> { + using Complex = ComplexType; + Complex one = static_cast(1.0f); + + __device__ __forceinline__ Complex operator()(const Complex x) const { + return x / (one + static_cast(abs(x))); + } +}; + template struct CudaSoftsignGradFunctor : public BaseActivationFunctor { T one = static_cast(1.0f); @@ -4353,6 +4390,23 @@ struct CudaSoftsignGradFunctor : public BaseActivationFunctor { static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } }; +template +struct CudaSoftsignGradFunctor> + : public BaseActivationFunctor> { + using Complex = ComplexType; + Complex one = static_cast(1.0f); + + __device__ __forceinline__ Complex operator()(const Complex dout, + const Complex x) const { + Complex abs_x = static_cast(abs(x)); + Complex abs_x_plus = one + abs_x; + Complex temp = static_cast((-x / (abs_x_plus * abs_x_plus)).real); + return dout * (one / abs_x_plus + temp * x / abs_x); + } + + static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; } +}; + template struct CudaSigmoidFunctor : public BaseActivationFunctor { using MPType = typename phi::dtype::MPTypeTrait::Type; diff --git a/paddle/phi/kernels/gpu/activation_grad_kernel.cu b/paddle/phi/kernels/gpu/activation_grad_kernel.cu index c67864bc13f57..2a1c6759bbc8b 100644 --- a/paddle/phi/kernels/gpu/activation_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_grad_kernel.cu @@ -495,7 +495,8 @@ PD_REGISTER_KERNEL(cos_triple_grad, phi::dtype::complex, phi::dtype::complex) {} -PD_REGISTER_ACTIVATION_GRAD_KERNEL(softsign_grad, SoftsignGradKernel) +PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(softsign_grad, + SoftsignGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(sigmoid_grad, SigmoidGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL_WITH_COMPLEX(sigmoid_double_grad, SigmoidDoubleGradKernel) diff --git a/paddle/phi/kernels/gpu/activation_kernel.cu b/paddle/phi/kernels/gpu/activation_kernel.cu index 6eeba717ece0d..34bbbfbd11859 100644 --- a/paddle/phi/kernels/gpu/activation_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_kernel.cu @@ -292,7 +292,7 @@ PD_REGISTER_ACTIVATION_KERNEL(softshrink, SoftShrinkKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh_shrink, TanhShrinkKernel) PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel) PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(silu, SiluKernel) -PD_REGISTER_ACTIVATION_KERNEL(softsign, SoftsignKernel) +PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(softsign, SoftsignKernel) PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(sigmoid, SigmoidKernel) PD_REGISTER_ACTIVATION_KERNEL_WITH_COMPLEX(logsigmoid, LogSigmoidKernel) PD_REGISTER_ACTIVATION_KERNEL(hardsigmoid, HardSigmoidKernel) diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index fb271c5d4c394..43c17d740cd7f 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -1389,7 +1389,7 @@ def softsign(x, name=None): softsign(x) = \frac{x}{1 + |x|} Parameters: - x (Tensor): The input Tensor with data type float32, float64. + x (Tensor): The input Tensor with data type float32, float64, complex64 or complex128. name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 657bc609ca0a5..24307d16e50f1 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -4147,6 +4147,11 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) + if self.dtype == np.complex64 or self.dtype == np.complex128: + x = ( + np.random.uniform(-1, 1, self.shape) + + 1j * np.random.uniform(-1, 1, self.shape) + ).astype(self.dtype) out = ref_softsign(x) self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} @@ -4162,6 +4167,16 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') +class TestSoftsign_Complex64(TestSoftsign): + def init_dtype(self): + self.dtype = np.complex64 + + +class TestSoftsign_Complex128(TestSoftsign): + def init_dtype(self): + self.dtype = np.complex128 + + class TestSoftsign_ZeroDim(TestSoftsign): def init_shape(self): self.shape = []