Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 5th No.11】add gammaincc and gammainc API #59357

Merged
merged 29 commits into from
Jan 24, 2024
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
9bc8ff5
【Hackathon 5th No.11】add igamma and igammac API
GreatV Nov 24, 2023
e2975de
fix bug
GreatV Nov 24, 2023
77fa04c
Merge branch 'develop' into add_igamma_igammac
GreatV Nov 28, 2023
93b329c
Merge branch 'develop' into add_igamma_igammac
GreatV Nov 28, 2023
26b8fea
Merge branch 'develop' into add_igamma_igammac
GreatV Nov 28, 2023
c753bbc
fix codestyle
GreatV Nov 28, 2023
95ab00e
Merge branch 'develop' into add_igamma_igammac
GreatV Dec 18, 2023
72d9733
fix bug
GreatV Dec 18, 2023
7bb8331
update ut
GreatV Dec 18, 2023
698b168
Merge branch 'develop' into add_igamma_igammac
GreatV Dec 18, 2023
be0902b
fix bug
GreatV Dec 18, 2023
a305761
fix bug
GreatV Dec 18, 2023
e35b378
add test inplace
GreatV Dec 19, 2023
00b9c41
fix bug
GreatV Dec 19, 2023
64c8fdb
fix bug
GreatV Dec 19, 2023
df1cd20
remove unused comment
GreatV Dec 19, 2023
42cc077
remove some c++ impl
GreatV Dec 20, 2023
3d2a1d1
update code
GreatV Dec 24, 2023
47fcd00
Merge branch 'develop' into add_igamma_igammac
GreatV Dec 24, 2023
623f01f
fix bug
GreatV Dec 24, 2023
56e5ce8
fix bug
GreatV Dec 25, 2023
faf3757
update
GreatV Dec 27, 2023
ad0e1cc
remove some paddle.enable_static()
GreatV Jan 4, 2024
ac25528
remove eigen impl
GreatV Jan 8, 2024
8f96075
Merge branch 'develop' into add_igamma_igammac
GreatV Jan 8, 2024
d7c1b59
fix test_inplace
GreatV Jan 9, 2024
cb34836
Merge branch 'develop' into add_igamma_igammac
GreatV Jan 22, 2024
af72a5c
rename op
GreatV Jan 23, 2024
ec4b0de
igamma(a, x) -> gammaincc(x, y)
GreatV Jan 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1148,6 +1148,16 @@
data_type : out_grad
inplace : (out_grad -> x_grad)

- backward_op : igamma_grad
forward : igamma(Tensor x, Tensor a) -> Tensor(out)
args : (Tensor x, Tensor a, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : igamma_grad

- backward_op : imag_grad
forward : imag (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1311,6 +1311,17 @@
inplace: (x -> out)
backward : identity_loss_grad

- op : igamma
GreatV marked this conversation as resolved.
Show resolved Hide resolved
args : (Tensor x, Tensor a)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
param : [x, a]
kernel :
func : igamma
inplace: (x -> out)
backward : igamma_grad

- op : imag
args : (Tensor x)
output : Tensor (out)
Expand Down
22 changes: 22 additions & 0 deletions paddle/phi/kernels/cpu/igamma_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/igamma_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/igamma_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
igamma_grad, CPU, ALL_LAYOUT, phi::IgammaGradKernel, float, double) {}
GreatV marked this conversation as resolved.
Show resolved Hide resolved
22 changes: 22 additions & 0 deletions paddle/phi/kernels/cpu/igamma_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/igamma_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

PD_REGISTER_KERNEL(igamma, CPU, ALL_LAYOUT, phi::IgammaKernel, float, double) {}
22 changes: 22 additions & 0 deletions paddle/phi/kernels/gpu/igamma_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/igamma_grad_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/igamma_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
igamma_grad, GPU, ALL_LAYOUT, phi::IgammaGradKernel, float, double) {}
22 changes: 22 additions & 0 deletions paddle/phi/kernels/gpu/igamma_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/igamma_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

#include "paddle/phi/kernels/impl/igamma_kernel_impl.h"

PD_REGISTER_KERNEL(igamma, GPU, ALL_LAYOUT, phi::IgammaKernel, float, double) {}
28 changes: 28 additions & 0 deletions paddle/phi/kernels/igamma_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void IgammaGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& a,
const DenseTensor& d_out,
DenseTensor* d_x);
} // namespace phi
27 changes: 27 additions & 0 deletions paddle/phi/kernels/igamma_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void IgammaKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out);
} // namespace phi
62 changes: 62 additions & 0 deletions paddle/phi/kernels/impl/igamma_grad_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h"

namespace phi {
template <typename T>
struct IgammaGradFunctor {
IgammaGradFunctor(
const T* dout, const T* x, const T* a, T* output, int64_t numel)
: dout_(dout), x_(x), a_(a), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_dout = static_cast<MT>(dout_[idx]);
const MT mp_x = static_cast<MT>(x_[idx]);
const MT mp_a = static_cast<MT>(a_[idx]);
const MT mp_a_1 = static_cast<MT>(a_[idx] - 1);
output_[idx] = static_cast<T>(mp_dout * -std::exp(-mp_x) *
std::pow(mp_x, mp_a_1) / std::tgamma(mp_a));
}

private:
const T* dout_;
const T* x_;
const T* a_;
T* output_;
int64_t numel_;
};

template <typename T, typename Context>
void IgammaGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& a,
const DenseTensor& d_out,
DenseTensor* d_x) {
auto numel = d_out.numel();
auto* dout_data = d_out.data<T>();
auto* x_data = x.data<T>();
auto* a_data = a.data<T>();
auto* dx_data =
dev_ctx.template Alloc<T>(d_x, static_cast<size_t>(numel * sizeof(T)));
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
IgammaGradFunctor<T> functor(dout_data, x_data, a_data, dx_data, numel);
for_range(functor);
}
} // namespace phi
143 changes: 143 additions & 0 deletions paddle/phi/kernels/impl/igamma_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h"

#define MAXLOG 7.09782712893383996732E2
#define MACHEP 1.11022302462515654042E-16

namespace phi {
template <typename T>
HOSTDEVICE T igam(const T a, const T x);
template <typename T>
HOSTDEVICE T igamc(const T a, const T x);

template <typename T>
HOSTDEVICE T igam(const T a, const T x) {
if ((x <= T{0}) || (a <= T{0})) return (T{0.0});

if ((x > T{1.0}) && (x > a)) return (T{1.0} - igamc(a, x));

/* Compute x**a * exp(-x) / gamma(a) */
T ax = a * log(x) - x - std::lgamma(a);
if (ax < -MAXLOG) {
return (T{0.0});
}
ax = exp(ax);

/* power series */
T r = a;
T c = T{1.0};
T ans = T{1.0};

do {
r += T{1.0};
c *= x / r;
ans += c;
} while (c / ans > MACHEP);

return (ans * ax / a);
}

template <typename T>
HOSTDEVICE T igamc(const T a, const T x) {
static T big = 4.503599627370496e15;
static T biginv = 2.22044604925031308085e-16;

if ((x <= T{0}) || (a <= T{0})) return (T{1.0});

if ((x < T{1.0}) || (x < a)) return (T{1.0} - igam(a, x));

T ax = a * log(x) - x - std::lgamma(a);
if (ax < -MAXLOG) {
return (T{0.0});
}
ax = exp(ax);

/* continued fraction */
T y = T{1.0} - a;
T z = x + y + T{1.0};
T c = T{0.0};
T pkm2 = T{1.0};
T qkm2 = x;
T pkm1 = x + T{1.0};
T qkm1 = z * x;
T ans = pkm1 / qkm1;
T t;
do {
c += T{1.0};
y += T{1.0};
z += T{2.0};
T yc = y * c;
T pk = pkm1 * z - pkm2 * yc;
T qk = qkm1 * z - qkm2 * yc;
if (qk != T{0}) {
T r = pk / qk;
t = fabs((ans - r) / r);
ans = r;
} else {
t = T{1.0};
}
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (fabs(pk) > big) {
pkm2 *= biginv;
pkm1 *= biginv;
qkm2 *= biginv;
qkm1 *= biginv;
}
} while (t > MACHEP);

return (ans * ax);
}

template <typename T>
struct IgammaFunctor {
IgammaFunctor(const T* x, const T* a, T* output, int64_t numel)
: x_(x), a_(a), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_x = static_cast<MT>(x_[idx]);
const MT mp_a = static_cast<MT>(a_[idx]);
output_[idx] = static_cast<T>(igamc<MT>(mp_a, mp_x));
}

private:
const T* x_;
const T* a_;
T* output_;
int64_t numel_;
};

template <typename T, typename Context>
void IgammaKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& a,
DenseTensor* out) {
auto numel = x.numel();
auto* x_data = x.data<T>();
auto* a_data = a.data<T>();
auto* out_data = dev_ctx.template Alloc<T>(out);
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
IgammaFunctor<T> functor(x_data, a_data, out_data, numel);
for_range(functor);
}
} // namespace phi