Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/edward stats #238

Merged
merged 3 commits into from Aug 29, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/source/data.rst
Expand Up @@ -15,8 +15,8 @@ We detail specifics for each modeling language below.
class BetaBernoulli:
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
for z in tf.unpack(zs['p'])])
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], p=p))
for p in tf.unpack(zs['p'])])
return log_lik + log_prior

model = BetaBernoulli()
Expand Down
8 changes: 4 additions & 4 deletions docs/source/models.rst
Expand Up @@ -28,11 +28,11 @@ evaluation for each set of latent variables. Here is an example:
from edward.stats import bernoulli, beta

class BetaBernoulli:
"""p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)"""
"""p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], z))
for z in tf.unpack(zs['p'])])
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs['x'], p=p))
for p in tf.unpack(zs['p'])])
return log_lik + log_prior

model = BetaBernoulli()
Expand Down Expand Up @@ -65,7 +65,7 @@ Here is an example:
from scipy.stats import bernoulli, beta

class BetaBernoulli(PythonModel):
"""p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)"""
"""p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
def _py_log_prob(self, xs, zs):
# This example is written for pedagogy. We recommend
# vectorizing operations in practice.
Expand Down
2 changes: 1 addition & 1 deletion docs/tex/getting-started.tex
Expand Up @@ -49,7 +49,7 @@ \subsubsection{Your first Edward program}
# Specify the likelihood. Its mean is the output of a neural
# network taking `x` as input with weights `zs`.
mus = self.neural_network(x, zs)
log_lik = tf.reduce_sum(norm.logpdf(y, loc=mus, scale=1), 1)
log_lik = tf.reduce_sum(norm.logpdf(y, mu=mus, sigma=1.0), 1)
return log_prior + log_lik
\end{lstlisting}

Expand Down
4 changes: 2 additions & 2 deletions docs/tex/tut_gp_classification.tex
Expand Up @@ -65,7 +65,7 @@ \subsection{Gaussian process classification}
Gaussian process classification

p((x,y), z) = Bernoulli(y | logit^{-1}(x*z)) *
Normal(z | 0, K),
Normal(z | 0, K),

where z are weights drawn from a GP with covariance given by k(x,
x') for each pair of inputs (x, x'), and with squared-exponential
Expand Down Expand Up @@ -109,7 +109,7 @@ \subsection{Gaussian process classification}
x, y = xs['x'], xs['y']
log_prior = multivariate_normal.logpdf(zs['z'], cov=self.kernel(x))
log_lik = tf.pack([tf.reduce_sum(
bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z))))
bernoulli.logpmf(y, p=self.inverse_link(tf.mul(y, z))))
for z in tf.unpack(zs['z'])])
return log_prior + log_lik

Expand Down
2 changes: 1 addition & 1 deletion docs/tex/tut_latent_space_models.tex
Expand Up @@ -104,7 +104,7 @@ \subsubsection{Model}
xp = tf.matmul(z, z, transpose_b=True)

if self.like == 'Gaussian':
log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
elif self.like == 'Poisson':
if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
raise NotImplementedError("Rate of Poisson has to be nonnegatve.")
Expand Down
8 changes: 4 additions & 4 deletions docs/tex/tut_mixture_gaussian.tex
Expand Up @@ -108,17 +108,17 @@ \subsection{Mixture of Gaussians}
self.D = D
self.n_vars = (2 * D + 1) * K

self.a = 1
self.b = 1
self.c = 10
self.a = 1.0
self.b = 1.0
self.c = 3.0
self.alpha = tf.ones([K])

def log_prob(self, xs, zs):
"""Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
x = xs['x']
pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']
log_prior = dirichlet.logpdf(pi, self.alpha)
log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c), 1)
log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

# Loop over each sample zs[s, :].
Expand Down
4 changes: 2 additions & 2 deletions docs/tex/tut_supervised_classification.tex
Expand Up @@ -39,7 +39,7 @@ \subsubsection{Model}
Gaussian process classification

p((x,y), z) = Bernoulli(y | logit^{-1}(x*z)) *
Normal(z | 0, K),
Normal(z | 0, K),

where z are weights drawn from a GP with covariance given by k(x,
x') for each pair of inputs (x, x'), and with squared-exponential
Expand Down Expand Up @@ -83,7 +83,7 @@ \subsubsection{Model}
x, y = xs['x'], xs['y']
log_prior = multivariate_normal.logpdf(zs['z'], cov=self.kernel(x))
log_lik = tf.pack([tf.reduce_sum(
bernoulli.logpmf(y, self.inverse_link(tf.mul(y, z))))
bernoulli.logpmf(y, p=self.inverse_link(tf.mul(y, z))))
for z in tf.unpack(zs['z'])])
return log_prior + log_lik

Expand Down
8 changes: 4 additions & 4 deletions docs/tex/tut_unsupervised.tex
Expand Up @@ -72,17 +72,17 @@ \subsubsection{Model}
self.D = D
self.n_vars = (2 * D + 1) * K

self.a = 1
self.b = 1
self.c = 10
self.a = 1.0
self.b = 1.0
self.c = 3.0
self.alpha = tf.ones([K])

def log_prob(self, xs, zs):
"""Return a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
x = xs['x']
pi, mus, sigmas = zs['pi'], zs['mu'], zs['sigma']
log_prior = dirichlet.logpdf(pi, self.alpha)
log_prior += tf.reduce_sum(norm.logpdf(mus, 0, np.sqrt(self.c)), 1)
log_prior += tf.reduce_sum(norm.logpdf(mus, 0.0, self.c), 1)
log_prior += tf.reduce_sum(invgamma.logpdf(sigmas, self.a, self.b), 1)

# Loop over each sample zs[s, :].
Expand Down
2 changes: 1 addition & 1 deletion edward/inferences.py
Expand Up @@ -103,7 +103,7 @@ def __init__(self, latent_vars, data=None, model_wrapper=None):
# If ``data`` has tensors that are the output of
# data readers, then batch training operates
# according to the reader.
self.data[key] = value
self.data[key] = tf.cast(value, tf.float32)
elif isinstance(value, np.ndarray):
# If ``data`` has NumPy arrays, store the data
# in the computational graph.
Expand Down