-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
pareto.py
119 lines (96 loc) · 3.26 KB
/
pareto.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer import utils
from chainer.utils import cache
class Pareto(distribution.Distribution):
"""Pareto Distribution.
.. math::
f(x) = \\alpha x_m^{\\alpha}(x)^{-(\\alpha+1)},
Args:
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`x_m`.
alpha(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\alpha`.
"""
def __init__(self, scale, alpha):
super(Pareto, self).__init__()
self.__scale = scale
self.__alpha = alpha
@cache.cached_property
def scale(self):
return chainer.as_variable(self.__scale)
@cache.cached_property
def alpha(self):
return chainer.as_variable(self.__alpha)
@cache.cached_property
def _log_scale(self):
return exponential.log(self.scale)
@cache.cached_property
def _log_alpha(self):
return exponential.log(self.alpha)
@property
def batch_shape(self):
return self.scale.shape
@cache.cached_property
def entropy(self):
return - self._log_alpha + self._log_scale \
+ 1. / self.alpha + 1.
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.scale.data, cuda.ndarray)
def log_prob(self, x):
x = chainer.as_variable(x)
logp = self._log_alpha \
+ self.alpha * self._log_scale \
- (self.alpha + 1) * exponential.log(x)
xp = logp.xp
return where.where(
utils.force_array(x.data >= self.scale.data),
logp, xp.array(-xp.inf, logp.dtype))
@cache.cached_property
def mean(self):
mean = (self.alpha * self.scale / (self.alpha - 1))
xp = mean.xp
return where.where(
self.alpha.data > 1,
mean, xp.array(xp.inf, mean.dtype))
@property
def params(self):
return {'scale': self.scale, 'alpha': self.alpha}
def sample_n(self, n):
xp = cuda.get_array_module(self.scale)
if xp is cuda.cupy:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape, dtype=self.alpha.dtype)
else:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape
).astype(self.alpha.dtype)
noise = self.scale * (eps + 1)
return noise
@property
def support(self):
return '[scale, inf]'
@cache.cached_property
def variance(self):
var = self.scale ** 2 * self.alpha / (self.alpha - 1) ** 2 \
/ (self.alpha - 2)
xp = var.xp
return where.where(
self.alpha.data > 2,
var, xp.array(xp.inf, var.dtype))
@distribution.register_kl(Pareto, Pareto)
def _kl_pareto_pareto(dist1, dist2):
kl = dist2.alpha * (dist1._log_scale - dist2._log_scale) \
+ dist1._log_alpha - dist2._log_alpha \
+ (dist2.alpha - dist1.alpha) / dist1.alpha
xp = kl.xp
return where.where(
dist1.scale.data >= dist2.scale.data,
kl, xp.array(xp.inf, kl.dtype))