Skip to content
Permalink
Browse files

Merge 5b28e26 into c9fb8b8

  • Loading branch information...
ganow committed Apr 1, 2019
2 parents c9fb8b8 + 5b28e26 commit e47fbfdd2cb003535dc68af8d58d8d18f9ac762e
@@ -10,6 +10,7 @@
from chainer.distributions.gamma import Gamma # NOQA
from chainer.distributions.geometric import Geometric # NOQA
from chainer.distributions.gumbel import Gumbel # NOQA
from chainer.distributions.independent import Independent # NOQA
from chainer.distributions.laplace import Laplace # NOQA
from chainer.distributions.log_normal import LogNormal # NOQA
from chainer.distributions.multivariate_normal import MultivariateNormal # NOQA
@@ -131,6 +131,10 @@ def log_prob(self, x):
def mean(self):
return self.p

@property
def params(self):
return {'logit': self.logit}

def prob(self, x):
x = chainer.as_variable(x)
prob = x * self.p + (1 - x) * (1 - self.p)
@@ -79,6 +79,10 @@ def log_prob(self, x):
def mean(self):
return self.a / self._a_plus_b

@property
def params(self):
return {'a': self.a, 'b': self.b}

def sample_n(self, n):
xp = backend.get_array_module(self.a)
eps = xp.random.beta(self.a.data, self.b.data, size=(n,)+self.a.shape)
@@ -74,6 +74,10 @@ def log_prob(self, x):
else:
return self.log_p[mg + [x.astype(numpy.int32)]]

@property
def params(self):
return {'p': self.p}

def sample_n(self, n):
xp = backend.get_array_module(self.p)
onebyone_p = self.p.data.reshape(-1, self.p.shape[-1])
@@ -80,6 +80,10 @@ def mean(self):
xp = cuda.get_array_module(self.loc)
return chainer.as_variable(xp.full_like(self.loc.data, xp.nan))

@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}

def sample_n(self, n):
xp = cuda.get_array_module(self.loc)
if xp is cuda.cupy:
@@ -56,6 +56,10 @@ def log_prob(self, x):
def mean(self):
return self.k

@property
def params(self):
return {'k': self.k}

def sample_n(self, n):
xp = cuda.get_array_module(self.k)
if xp is cuda.cupy:
@@ -68,6 +68,10 @@ def mean(self):
alpha0 = expand_dims.expand_dims(self.alpha0, axis=-1)
return self.alpha / alpha0

@property
def params(self):
return {'alpha': self.alpha}

def sample_n(self, n):
obo_alpha = self.alpha.data.reshape(-1, self.event_shape[0])
xp = cuda.get_array_module(self.alpha)
@@ -69,6 +69,10 @@ def log_prob(self, x):
def mean(self):
return 1 / self.lam

@property
def params(self):
return {'lam': self.lam}

def sample_n(self, n):
xp = cuda.get_array_module(self.lam)
if xp is cuda.cupy:
@@ -63,6 +63,10 @@ def log_prob(self, x):
def mean(self):
return self.k * self.theta

@property
def params(self):
return {'k': self.k, 'theta': self.theta}

def sample_n(self, n):
xp = cuda.get_array_module(self.k)
if xp is cuda.cupy:
@@ -47,6 +47,10 @@ def log_prob(self, x):
def mean(self):
return 1 / self.p

@property
def params(self):
return {'p': self.p}

def sample_n(self, n):
xp = cuda.get_array_module(self.p)
if xp is cuda.cupy:
@@ -71,6 +71,10 @@ def log_prob(self, x):
def mean(self):
return self.loc + EULER * self.scale

@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}

def sample_n(self, n):
xp = cuda.get_array_module(self.loc)
if xp is cuda.cupy:
@@ -0,0 +1,247 @@
import functools
import operator

import numpy

from chainer.backend import cuda
from chainer import distribution
from chainer.functions.array import repeat
from chainer.functions.array import reshape
from chainer.functions.array import transpose
from chainer.functions.math import sum as sum_mod
from chainer.functions.math import prod
from chainer.utils import cache


class Independent(distribution.Distribution):

"""Independent distribution.
Args:
distribution (:class:`~chainer.Distribution`): The base distribution
instance to transform.
reinterpreted_batch_ndims (:class:`int`): Integer number of rightmost
batch dims which will be regarded as event dims. When ``None`` all
but the first batch axis (batch axis 0) will be transferred to
event dimensions.
"""

def __init__(self, distribution, reinterpreted_batch_ndims=None):
super(Independent, self).__init__()
self.__distribution = distribution
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = \
self._get_default_reinterpreted_batch_ndims(distribution)
elif reinterpreted_batch_ndims > len(distribution.batch_shape):
raise ValueError(
'reinterpreted_batch_ndims must be less than or equal to the '
'number of dimensions of `distribution.batch_shape`.')
self.__reinterpreted_batch_ndims = reinterpreted_batch_ndims

batch_ndim = \
len(self.distribution.batch_shape) - self.reinterpreted_batch_ndims
self.__batch_shape = distribution.batch_shape[:batch_ndim]
self.__event_shape = \
distribution.batch_shape[batch_ndim:] + distribution.event_shape

@property
def distribution(self):
return self.__distribution

@property
def reinterpreted_batch_ndims(self):
return self.__reinterpreted_batch_ndims

@property
def batch_shape(self):
return self.__batch_shape

@property
def event_shape(self):
return self.__event_shape

@property
def covariance(self):
'''Returns the covariance of the distribution based on the original
i.i.d. distribution. By definition, the covariance of the new
distribution becomes block diagonal matrix. Let
:math:`\\Sigma_{\\mathbf{x}}` be the covariance matrix of the original
random variable :math:`\\mathbf{x} \\in \\mathbb{R}^d`, and
:math:`\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots \\mathbf{x}^{(m)}`
be the :math:`m` i.i.d. random variables, new covariance matrix
:math:`\\Sigma_{\\mathbf{y}}` of :math:`\\mathbf{y} =
[\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots, \\mathbf{x}^{(m)}] \\in
\\mathbb{R}^{md}` can be written as
.. math::
\\left[\\begin{array}{ccc}
\\Sigma_{\\mathbf{x}^{1}} & & 0 \\\\
& \\ddots & \\\\
0 & & \\Sigma_{\\mathbf{x}^{m}}
\\end{array} \\right].
Note that this relationship holds only if the covariance matrix of the
original distribution is given analytically.
'''
num_repeat = functools.reduce(
operator.mul,
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:], 1)
dim = functools.reduce(operator.mul, self.distribution.event_shape, 1)
cov = repeat.repeat(
reshape.reshape(
self.distribution.covariance,
((self.batch_shape) + (1, num_repeat, dim, dim))),
num_repeat, axis=-4)
cov = reshape.reshape(
transpose.transpose(
cov, axes=(
tuple(range(len(self.batch_shape))) + (-4, -2, -3, -1))),
self.batch_shape + (num_repeat * dim, num_repeat * dim))
block_indicator = self.xp.reshape(
self._block_indicator,
tuple([1] * len(self.batch_shape)) + self._block_indicator.shape)
return cov * block_indicator

@property
def entropy(self):
return self._reduce(sum_mod.sum, self.distribution.entropy)

def cdf(self, x):
return self._reduce(prod.prod, self.distribution.cdf(x))

def icdf(self, x):
'''Cumulative distribution function for multivariate variable is not
invertible. This function always raises :class:`RuntimeError`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the codomain of the distribution
Raises:
:class:`RuntimeError`
'''

raise RuntimeError(
'Cumulative distribution function for multivariate variable '
'is not invertible.')

def log_cdf(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_cdf(x))

def log_prob(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_prob(x))

def log_survival_function(self, x):
return self._reduce(
sum_mod.sum, self.distribution.log_survival_function(x))

@property
def mean(self):
return self.distribution.mean

@property
def mode(self):
return self.distribution.mode

@property
def params(self):
return self.distribution.params

def perplexity(self, x):
return self._reduce(prod.prod, self.distribution.perplexity(x))

def prob(self, x):
return self._reduce(prod.prod, self.distribution.prob(x))

def sample_n(self, n):
return self.distribution.sample_n(n)

@property
def stddev(self):
return self.distribution.stddev

@property
def support(self):
return self.distribution.support

def survival_function(self, x):
return self._reduce(prod.prod, self.distribution.survival_function(x))

@property
def variance(self):
return self.distribution.variance

@property
def xp(self):
return self.distribution.xp

def _reduce(self, op, stat):
range_ = tuple(
(-1 - numpy.arange(self.reinterpreted_batch_ndims)).tolist())
return op(stat, axis=range_)

def _get_default_reinterpreted_batch_ndims(self, distribution):
ndims = len(distribution.batch_shape)
return max(0, ndims - 1)

@cache.cached_property
def _block_indicator(self):
num_repeat = functools.reduce(
operator.mul,
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:], 1)
dim = functools.reduce(operator.mul, self.distribution.event_shape, 1)
block_indicator = numpy.fromfunction(
lambda i, j: i // dim == j // dim,
(num_repeat * dim, num_repeat * dim)).astype(int)
if self.xp is cuda.cupy:
block_indicator = cuda.to_gpu(block_indicator)
return block_indicator


@distribution.register_kl(Independent, Independent)
def _kl_independent_independent(dist1, dist2):
'''Batched KL divergence :math:`\\mathrm{KL}(\\mathrm{dist1} ||
\\mathrm{dist2})` for Independent distributions.
We can leverage the fact that
.. math::
\\mathrm{KL}(
\\mathrm{Independent}(\\mathrm{dist1}) ||
\\mathrm{Independent}(\\mathrm{dist2}))
= \\mathrm{sum}(\\mathrm{KL}(\\mathrm{dist1} || \\mathrm{dist2}))
where the sum is over the ``reinterpreted_batch_ndims``.
Args:
dist1 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
dist2 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
Returns:
Batchwise ``KL(dist1 || dist2)``.
Raises:
:class:`ValueError`: If the event space for ``dist1`` and ``dist2``,
or their underlying distributions don't match.
'''

p = dist1.distribution
q = dist2.distribution

# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if dist1.event_shape == dist2.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = len(dist1.event_shape) - len(p.event_shape)
reduce_dims = tuple([-i - 1 for i in range(0, num_reduce_dims)])

return sum_mod.sum(
distribution.kl_divergence(p, q), axis=reduce_dims)
else:
raise NotImplementedError(
'KL between Independents with different '
'event shapes not supported.')
else:
raise ValueError('Event shapes do not match.')
@@ -115,6 +115,10 @@ def mean(self):
def mode(self):
return self.loc

@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}

def prob(self, x):
scale = self.scale
return 0.5 / scale * exponential.exp(- abs(x - self.loc) / scale)
@@ -65,6 +65,10 @@ def log_prob(self, x):
def mean(self):
return exponential.exp(self.mu + 0.5 * self.sigma ** 2)

@property
def params(self):
return {'mu': self.mu, 'sigma': self.sigma}

def sample_n(self, n):
xp = backend.get_array_module(self.mu)
if xp is cuda.cupy:

0 comments on commit e47fbfd

Please sign in to comment.
You can’t perform that action at this time.