Skip to content

Commit

Permalink
Merge pull request #1679 from pymc-devs/init_case2
Browse files Browse the repository at this point in the history
Make init argument to sample case-insensitive
  • Loading branch information
twiecki committed Jan 19, 2017
2 parents b6827a6 + faab2e1 commit f0786f7
Show file tree
Hide file tree
Showing 9 changed files with 40 additions and 35 deletions.
2 changes: 1 addition & 1 deletion pymc3/examples/arbitrary_stochastic.py
Expand Up @@ -20,7 +20,7 @@ def run(n_samples=3000):
start = model.test_point
h = pm.find_hessian(start, model=model)
step = pm.Metropolis(model.vars, h, blocked=True, model=model)
trace = pm.sample(n_samples, step, start, model=model)
trace = pm.sample(n_samples, step=step, start=start, model=model)
return trace

if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion pymc3/examples/factor_potential.py
Expand Up @@ -13,7 +13,7 @@ def run(n=3000):
if n == "short":
n = 50
with model:
trace = sample(n, step, start)
trace = sample(n, step=step, start=start)

if __name__ == '__main__':
run()
31 changes: 18 additions & 13 deletions pymc3/sampling.py
Expand Up @@ -98,20 +98,20 @@ def sample(draws, step=None, init='advi', n_init=200000, start=None,
A step function or collection of functions. If no step methods are
specified, or are partially specified, they will be assigned
automatically (defaults to None).
init : str {'advi', 'advi_map', 'map', 'nuts', None}
init : str {'ADVI', 'ADVI_MAP', 'MAP', 'NUTS', None}
Initialization method to use.
* advi : Run ADVI to estimate starting points and diagonal covariance
* ADVI : Run ADVI to estimate starting points and diagonal covariance
matrix. If njobs > 1 it will sample starting points from the estimated
posterior, otherwise it will use the estimated posterior mean.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point.
* nuts : Run NUTS to estimate starting points and covariance matrix. If
* ADVI_MAP: Initialize ADVI with MAP and use MAP as starting point.
* MAP : Use the MAP as starting point.
* NUTS : Run NUTS to estimate starting points and covariance matrix. If
njobs > 1 it will sample starting points from the estimated posterior,
otherwise it will use the estimated posterior mean.
* None : Do not initialize.
n_init : int
Number of iterations of initializer
If 'advi', number of iterations, if 'nuts', number of draws.
If 'ADVI', number of iterations, if 'nuts', number of draws.
start : dict
Starting point in parameter space (or partial point)
Defaults to trace.point(-1)) if there is a trace provided and
Expand Down Expand Up @@ -147,6 +147,8 @@ def sample(draws, step=None, init='advi', n_init=200000, start=None,
"""
model = modelcontext(model)

if init is not None:
init = init.lower()

if step is None and init is not None and pm.model.all_continuous(model.vars):
# By default, use NUTS sampler
Expand Down Expand Up @@ -398,7 +400,7 @@ def sample_ppc(trace, samples=None, model=None, vars=None, size=None, random_see
return {k: np.asarray(v) for k, v in ppc.items()}


def init_nuts(init='advi', njobs=1, n_init=500000, model=None,
def init_nuts(init='ADVI', njobs=1, n_init=500000, model=None,
random_seed=-1, **kwargs):
"""Initialize and sample from posterior of a continuous model.
Expand All @@ -409,17 +411,17 @@ def init_nuts(init='advi', njobs=1, n_init=500000, model=None,
Parameters
----------
init : str {'advi', 'advi_map', 'map', 'nuts'}
init : str {'ADVI', 'ADVI_MAP', 'MAP', 'NUTS'}
Initialization method to use.
* advi : Run ADVI to estimate posterior mean and diagonal covariance matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point.
* nuts : Run NUTS and estimate posterior mean and covariance matrix.
* ADVI : Run ADVI to estimate posterior mean and diagonal covariance matrix.
* ADVI_MAP: Initialize ADVI with MAP and use MAP as starting point.
* MAP : Use the MAP as starting point.
* NUTS : Run NUTS and estimate posterior mean and covariance matrix.
njobs : int
Number of parallel jobs to start.
n_init : int
Number of iterations of initializer
If 'advi', number of iterations, if 'metropolis', number of draws.
If 'ADVI', number of iterations, if 'metropolis', number of draws.
model : Model (optional if in `with` context)
**kwargs : keyword arguments
Extra keyword arguments are forwarded to pymc3.NUTS.
Expand All @@ -439,6 +441,9 @@ def init_nuts(init='advi', njobs=1, n_init=500000, model=None,
pm._log.info('Initializing NUTS using {}...'.format(init))

random_seed = int(np.atleast_1d(random_seed)[0])

if init is not None:
init = init.lower()

if init == 'advi':
v_params = pm.variational.advi(n=n_init, random_seed=random_seed)
Expand Down
14 changes: 7 additions & 7 deletions pymc3/tests/test_diagnostics.py
Expand Up @@ -20,9 +20,9 @@ def get_ptrace(self, n_samples):
# Run sampler
step1 = Slice([model.early_mean_log_, model.late_mean_log_])
step2 = Metropolis([model.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 90}
ptrace = sample(n_samples, [step1, step2], start, njobs=2, progressbar=False,
random_seed=[1, 4])
start = {'early_mean': 7., 'late_mean': 1., 'switchpoint': 100}
ptrace = sample(n_samples, step=[step1, step2], start=start, njobs=2,
progressbar=False, random_seed=[20090425, 19700903])
return ptrace

def test_good(self):
Expand Down Expand Up @@ -53,7 +53,7 @@ def test_right_shape_python_float(self, shape=None, test_shape=None):
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step, start,
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)

rhat = gelman_rubin(ptrace)['x']
Expand Down Expand Up @@ -92,7 +92,7 @@ def get_switchpoint(self, n_samples):
# Run sampler
step1 = Slice([model.early_mean_log_, model.late_mean_log_])
step2 = Metropolis([model.switchpoint])
trace = sample(n_samples, [step1, step2], progressbar=False, random_seed=1)
trace = sample(n_samples, step=[step1, step2], progressbar=False, random_seed=1)
return trace['switchpoint']

def test_geweke_negative(self):
Expand Down Expand Up @@ -153,7 +153,7 @@ def test_effective_n(self):
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step, start,
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)

n_effective = effective_n(ptrace)['x']
Expand All @@ -174,7 +174,7 @@ def test_effective_n_right_shape_python_float(self,
# start sampling at the MAP
start = find_MAP()
step = NUTS(scaling=start)
ptrace = sample(n_samples, step, start,
ptrace = sample(n_samples, step=step, start=start,
njobs=n_jobs, random_seed=42)

n_effective = effective_n(ptrace)['x']
Expand Down
8 changes: 4 additions & 4 deletions pymc3/tests/test_examples.py
Expand Up @@ -54,7 +54,7 @@ def test_run(self):
h = H(start)

step = pm.HamiltonianMC(model.vars, h)
pm.sample(50, step, start)
pm.sample(50, step=step, start=start)


class TestARM12_6(SeededTest):
Expand Down Expand Up @@ -88,7 +88,7 @@ def too_slow(self):
start = pm.find_MAP(start=start,
vars=[model['groupmean'], model['sd_interval_'], model['floor_m']])
step = pm.NUTS(model.vars, scaling=start)
pm.sample(50, step, start)
pm.sample(50, step=step, start=start)


class TestARM12_6Uranium(SeededTest):
Expand Down Expand Up @@ -129,7 +129,7 @@ def too_slow(self):
h = np.diag(H(start))

step = pm.HamiltonianMC(model.vars, h)
pm.sample(50, step, start)
pm.sample(50, step=step, start=start)


def build_disaster_model(masked=False):
Expand Down Expand Up @@ -263,7 +263,7 @@ def test_run(self):
start = {'psi': 0.5, 'z': (self.y > 0).astype(int), 'theta': 5}
step_one = pm.Metropolis([model.theta_interval_, model.psi_logodds_])
step_two = pm.BinaryMetropolis([model.z])
pm.sample(50, [step_one, step_two], start)
pm.sample(50, step=[step_one, step_two], start=start)


class TestRSV(SeededTest):
Expand Down
2 changes: 1 addition & 1 deletion pymc3/tests/test_glm.py
Expand Up @@ -34,7 +34,7 @@ def test_linear_component(self):
Normal('y_obs', mu=y_est, sd=sigma, observed=self.y_linear)
start = find_MAP(vars=[sigma])
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
Expand Down
8 changes: 4 additions & 4 deletions pymc3/tests/test_models_linear.py
Expand Up @@ -44,7 +44,7 @@ def test_linear_component(self):
Normal('y_obs', mu=lm.y_est, sd=sigma, observed=self.y_linear) # yields y_obs
start = find_MAP(vars=[sigma])
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

self.assertAlmostEqual(np.mean(trace['lm_Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['lm_x0']), self.slope, 1)
Expand All @@ -58,7 +58,7 @@ def test_linear_component_from_formula(self):
Normal('y_obs', mu=lm.y_est, sd=sigma, observed=self.y_linear)
start = find_MAP(vars=[sigma])
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
Expand All @@ -79,7 +79,7 @@ def test_glm(self):
)
start = find_MAP()
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)
self.assertAlmostEqual(np.mean(trace['glm_Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['glm_x0']), self.slope, 1)
self.assertAlmostEqual(np.mean(trace['glm_sd']), self.sd, 1)
Expand All @@ -91,7 +91,7 @@ def test_glm_from_formula(self):
Glm.from_formula('y ~ x', self.data_linear, name=NAME)
start = find_MAP()
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

self.assertAlmostEqual(np.mean(trace['%s_Intercept' % NAME]), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['%s_x' % NAME]), self.slope, 1)
Expand Down
6 changes: 3 additions & 3 deletions pymc3/tests/test_plots.py
Expand Up @@ -20,7 +20,7 @@ def test_plots():
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
trace = sample(3000, step=step, start=start)

traceplot(trace)
forestplot(trace)
Expand All @@ -34,7 +34,7 @@ def test_plots_multidimensional():
with model as model:
h = np.diag(find_hessian(start))
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
trace = sample(3000, step=step, start=start)

traceplot(trace)
plot_posterior(trace)
Expand All @@ -47,7 +47,7 @@ def test_multichain_plots():
step1 = Slice([model.early_mean_log_, model.late_mean_log_])
step2 = Metropolis([model.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}
ptrace = sample(1000, [step1, step2], start, njobs=2)
ptrace = sample(1000, step=[step1, step2], start=start, njobs=2)

forestplot(ptrace, varnames=['early_mean', 'late_mean'])
autocorrplot(ptrace, varnames=['switchpoint'])
Expand Down
2 changes: 1 addition & 1 deletion pymc3/tests/test_sampling.py
Expand Up @@ -59,7 +59,7 @@ def test_sample(self):
with self.model:
for njobs in test_njobs:
for steps in [1, 10, 300]:
pm.sample(steps, self.step, {}, None, njobs=njobs, random_seed=self.random_seed)
pm.sample(steps, step=self.step, njobs=njobs, random_seed=self.random_seed)

def test_sample_init(self):
with self.model:
Expand Down

0 comments on commit f0786f7

Please sign in to comment.