Skip to content

Commit

Permalink
Merge pull request #68 from dflemin3/dev
Browse files Browse the repository at this point in the history
Fixing Issue 67
  • Loading branch information
David Fleming committed May 5, 2020
2 parents f61390f + ea1ba33 commit f9df967
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 48 deletions.
91 changes: 44 additions & 47 deletions approxposterior/approx.py
Expand Up @@ -30,51 +30,53 @@ class ApproxPosterior(object):
"""
Class used to estimate approximate Bayesian posterior distributions or
perform Bayesian optimization using a Gaussian process surrogate model
Initial parameters:
Parameters
----------
theta : array-like
Input features (n_samples x n_features). Defaults to None.
y : array-like
Input result of forward model (n_samples,). Defaults to None.
lnprior : function
Defines the log prior over the input features.
lnlike : function
Defines the log likelihood function. In this function, it is assumed
that the forward model is evaluated on the input theta and the output
is used to evaluate the log likelihood.
priorSample : function
Method to randomly sample points over region allowed by prior
bounds : tuple/iterable
Hard bounds for parameters
gp : george.GP, optional
Gaussian Process that learns the likelihood conditioned on forward
model input-output pairs (theta, y). It's recommended that users
specify their own kernel, GP using george. If None is provided, then
approxposterior initialized a GP with a single ExpSquaredKernel as
these work well in practice.
algorithm : str, optional
Point selection algorithm that specifies which utility (also
referred to as acquisition) function to use. Defaults to bape.
Options are bape (Bayesian Active Learning for Posterior Estimation,
Kandasamy et al. (2015)), agp (Adapted Gaussian Process Approximation,
Wang & Li (2017)), alternate (between AGP and BAPE), and jones
(Jones et al. (1998) expected improvement).
Case doesn't matter. If alternate, runs agp on even numbers and bape
on odd.
For approximate Bayesian posterior estimation, bape or alternate
are typically the best optimizations. For Bayesian optimization,
jones (expected improvement) usually performs best.
Returns
-------
"""

def __init__(self, theta, y, lnprior, lnlike, priorSample, bounds, gp=None,
algorithm="bape"):
"""
Initializer.
Parameters
----------
theta : array-like
Input features (n_samples x n_features). Defaults to None.
y : array-like
Input result of forward model (n_samples,). Defaults to None.
lnprior : function
Defines the log prior over the input features.
lnlike : function
Defines the log likelihood function. In this function, it is assumed
that the forward model is evaluated on the input theta and the output
is used to evaluate the log likelihood.
priorSample : function
Method to randomly sample points over region allowed by prior
bounds : tuple/iterable
Hard bounds for parameters
gp : george.GP, optional
Gaussian Process that learns the likelihood conditioned on forward
model input-output pairs (theta, y). It's recommended that users
specify their own kernel, GP using george. If None is provided, then
approxposterior initialized a GP with a single ExpSquaredKernel as
these work well in practice.
algorithm : str, optional
Point selection algorithm that specifies which utility (also
referred to as acquisition) function to use. Defaults to bape.
Options are bape (Bayesian Active Learning for Posterior Estimation,
Kandasamy et al. (2015)), agp (Adapted Gaussian Process Approximation,
Wang & Li (2017)), alternate (between AGP and BAPE), and jones
(Jones et al. (1998) expected improvement).
Case doesn't matter. If alternate, runs agp on even numbers and bape
on odd.
For approximate Bayesian posterior estimation, bape or alternate
are typically the best optimizations. For Bayesian optimization,
jones (expected improvement) usually performs best.
Returns
-------
"""

# Need to supply the training set
Expand Down Expand Up @@ -489,12 +491,8 @@ def run(self, m=10, nmax=2, seed=None, timing=False, verbose=True,
else:
# Compute z score for each parameter mean relative to
# previous approximate marginal posterior distribution quantities
zScores = np.fabs((meanNN - meanPrev)/stdPrev)

# Save current zScore
self.marginalZScores.append(zScores)

if np.all(zScores < eps):
zScore = np.fabs((meanNN - meanPrev)/stdPrev)
if np.all(zScore < eps):
kk += 1
else:
kk = 0
Expand All @@ -513,14 +511,13 @@ def run(self, m=10, nmax=2, seed=None, timing=False, verbose=True,
kmax=kmax,
finalIteration=kk)


# If close for kmax consecutive iterations, converged!
if kk >= kmax:
if verbose:
print("Approximate marginal posterior distributions converged.")
print("Delta zScore threshold, eps: %e" % eps)
print("kk, kmax: %d, %d" % (kk, kmax))
print("Final abs(zScore):", zScores)
print("Final abs(zScore):", zScore)
break
# end function

Expand Down
11 changes: 10 additions & 1 deletion doc/conf.py
Expand Up @@ -56,7 +56,16 @@
'nbsphinx'
]

nbsphinx_prompt_width = 0
nbsphinx_prolog = """
.. raw:: html
<style>
.nbinput .prompt,
.nboutput .prompt {
display: none;
}
</style>
"""

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
Expand Down

0 comments on commit f9df967

Please sign in to comment.