Skip to content

Commit

Permalink
Update documentation to render nicely in sphinx
Browse files Browse the repository at this point in the history
  • Loading branch information
dsteinberg committed Aug 7, 2017
1 parent 51df614 commit d51163f
Show file tree
Hide file tree
Showing 12 changed files with 335 additions and 132 deletions.
14 changes: 7 additions & 7 deletions AUTHORS.rst
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
Credits
Authors
=======

Development Leads
-----------------

Daniel Steinberg
Lachlan McCalman
Louis Tiao
* Daniel Steinberg
* Lachlan McCalman
* Louis Tiao


Contributors
------------

Simon O'Callaghan
Alistair Reid
Joyce Wang
* Simon O'Callaghan
* Alistair Reid
* Joyce Wang
24 changes: 6 additions & 18 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,8 @@ Aboleth
:align: center


*A bare-bones TensorFlow framework for supervised Bayesian deep learning with
stochastic gradient variational Bayes (SGVB, Kingma and Welling 2014).*


Dependencies
------------

Minimal:

- numpy
- scipy
- tensorflow

Demos:

- bokeh
- scikit-learn
*A bare-bones TensorFlow framework for supervised Bayesian deep learning and
Gaussian process approximation with stochastic gradient variational Bayes.*


Installation
Expand Down Expand Up @@ -57,4 +42,7 @@ folder for some examples of creating and training algorithms with Aboleth.
References
----------

Kingma, D. P. and Welling, M. Auto-encoding variational Bayes. In ICLR, 2014.
.. [1] Kingma, D. P. and Welling, M. Auto-encoding variational Bayes. In ICLR,
2014.
.. [2] Cutajar, K. Bonilla, E. Michiardi, P. Filippone, M. Random Feature
Expansions for Deep Gaussian Processes. In ICML, 2017.
42 changes: 32 additions & 10 deletions aboleth/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,10 @@ class Normal(ParameterDistribution):
Parameters
----------
mu : Tensor
mean, shape [d_i, d_o]
mean, shape (d_in, d_out)
var : Tensor
variance, shape [d_i, d_o]
variance, shape (d_in, d_out)
"""

def __init__(self, mu=0., var=1.):
Expand All @@ -43,7 +44,14 @@ def __init__(self, mu=0., var=1.):
self.d = tf.shape(mu)

def sample(self):
"""Draw a random sample from this object."""
"""Draw a random sample from this object.
Returns
-------
x : Tensor
a sample of shape (d_in, d_out).
"""
# Reparameterisation trick
e = tf.random_normal(self.d, seed=next(seedgen))
x = self.mu + e * self.sigma
Expand All @@ -57,9 +65,10 @@ class Gaussian(ParameterDistribution):
Parameters
----------
mu : Tensor
mean, shape [d_i, d_o]
mean, shape (d_in, d_out)
L : Tensor
Cholesky of the covariance matrix, shape [d_o, d_i, d_i]
Cholesky of the covariance matrix, shape (d_out, d_in, d_in)
"""

def __init__(self, mu, L):
Expand All @@ -69,7 +78,14 @@ def __init__(self, mu, L):
self.d = tf.shape(mu)

def sample(self):
"""Construct a Normal distribution object."""
"""Draw a random sample from this object.
Returns
-------
x : Tensor
a sample of shape (d_in, d_out).
"""
# Reparameterisation trick
mu = self.transform_w(self.mu)
e = tf.random_normal(tf.shape(mu), seed=next(seedgen))
Expand All @@ -78,13 +94,13 @@ def sample(self):

@staticmethod
def transform_w(w):
"""Transform a weight matrix, [d_i, d_o] -> [d_o, d_i, 1]."""
"""Transform a weight matrix, (d_in, d_out) -> (d_out, d_in, 1)."""
wt = tf.expand_dims(tf.transpose(w), 2) # O x I x 1
return wt

@staticmethod
def itransform_w(wt):
"""Un-transform a weight matrix, [d_o, d_i, 1] -> [d_i, d_o]."""
"""Un-transform a weight matrix, (d_out, d_in, 1) -> (d_in, d_out)."""
w = tf.transpose(wt[:, :, 0])
return w

Expand All @@ -105,8 +121,9 @@ def norm_prior(dim, var):
Returns
-------
Q : Normal
P : Normal
the initialised prior Normal object.
"""
mu = tf.zeros(dim)
var = pos(tf.Variable(var, name="W_mu_p"))
Expand All @@ -128,6 +145,7 @@ def norm_posterior(dim, var0):
-------
Q : Normal
the initialised posterior Normal object.
"""
mu_0 = tf.random_normal(dim, stddev=np.sqrt(var0), seed=next(seedgen))
mu = tf.Variable(mu_0, name="W_mu_q")
Expand Down Expand Up @@ -156,6 +174,7 @@ def gaus_posterior(dim, var0):
-------
Q : Gaussian
the initialised posterior Gaussian object.
"""
I, O = dim
sig0 = np.sqrt(var0)
Expand Down Expand Up @@ -195,6 +214,7 @@ def kl_qp(q, p):
-------
KL : Tensor
the result of KL[q||p].
"""
KL = 0.5 * (tf.log(p.var) - tf.log(q.var) + q.var / p.var - 1. +
(q.mu - p.mu)**2 / p.var)
Expand All @@ -217,6 +237,7 @@ def kl_qp(q, p):
-------
KL : Tensor
the result of KL[q||p].
"""
D, n = tf.to_float(q.d[0]), tf.to_float(q.d[1])
tr = tf.reduce_sum(q.L * q.L) / p.var
Expand All @@ -241,6 +262,7 @@ def kl_qp(q, p):
-------
KL : Tensor
the result of KL[q||p].
"""
D, n = tf.to_float(q.d[0]), tf.to_float(q.d[1])
qCipC = tf.cholesky_solve(p.L, tf.matmul(q.L, q.L, transpose_b=True))
Expand All @@ -257,7 +279,7 @@ def kl_qp(q, p):
#

def _chollogdet(L):
"""Log det of a cholesky, where L is [..., D, D]."""
"""Log det of a cholesky, where L is (..., D, D)."""
l = tf.maximum(tf.matrix_diag_part(L), 1e-15) # Make sure we don't go to 0
logdet = 2. * tf.reduce_sum(tf.log(l))
return logdet
Loading

0 comments on commit d51163f

Please sign in to comment.