Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pymc3/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ class ValueGradFunction(object):
The value that we compute with its gradient.
grad_vars : list of named theano variables or None
The arguments with respect to which the gradient is computed.
extra_args : list of named theano variables or None
extra_vars : list of named theano variables or None
Other arguments of the function that are assumed constant. They
are stored in shared variables and can be set using
`set_extra_values`.
Expand Down
4 changes: 2 additions & 2 deletions pymc3/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ def init_nuts(init='auto', njobs=1, n_init=500000, model=None,
if njobs == 1:
start = start[0]
elif init == 'advi_map':
start = pm.find_MAP()
start = pm.find_MAP(include_transformed=True)
approx = pm.MeanField(model=model, start=start)
pm.fit(
random_seed=random_seed,
Expand All @@ -859,7 +859,7 @@ def init_nuts(init='auto', njobs=1, n_init=500000, model=None,
if njobs == 1:
start = start[0]
elif init == 'map':
start = pm.find_MAP()
start = pm.find_MAP(include_transformed=True)
cov = pm.find_hessian(point=start)
start = [start] * njobs
potential = quadpotential.QuadPotentialFull(cov)
Expand Down
3 changes: 1 addition & 2 deletions pymc3/tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import numpy as np
import pandas as pd
import pymc3 as pm
import scipy.optimize as opt
import theano.tensor as tt
import pytest
import theano
Expand Down Expand Up @@ -195,7 +194,7 @@ def build_model(self):

def test_run(self):
with self.build_model():
start = pm.find_MAP(fmin=opt.fmin_powell)
start = pm.find_MAP(method="Powell")
pm.sample(50, pm.Slice(), start=start)


Expand Down
25 changes: 6 additions & 19 deletions pymc3/tests/test_starting.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import numpy as np
from pymc3.tuning import starting
from pymc3 import Model, Uniform, Normal, Beta, Binomial, find_MAP, Point
from .models import simple_model, non_normal, exponential_beta, simple_arbitrary_det
from .models import simple_model, non_normal, simple_arbitrary_det
from .helpers import select_by_precision


Expand All @@ -20,19 +20,6 @@ def test_accuracy_non_normal():
close_to(newstart['x'], mu, select_by_precision(float64=1e-5, float32=1E-4))


def test_errors():
_, model, _ = exponential_beta(2)
with model:
try:
newstart = find_MAP(Point(x=[-.5, .01], y=[.5, 4.4]))
except ValueError as e:
msg = str(e)
assert "x.logp" in msg, msg
assert "x.value" not in msg, msg
else:
assert False, newstart


def test_find_MAP_discrete():
tol = 2.0**-11
alpha = 4
Expand All @@ -41,8 +28,8 @@ def test_find_MAP_discrete():
yes = 15

with Model() as model:
p = Beta('p', alpha, beta, transform=None)
Binomial('ss', n=n, p=p, transform=None)
p = Beta('p', alpha, beta)
Binomial('ss', n=n, p=p)
Binomial('s', n=n, p=p, observed=yes)

map_est1 = starting.find_MAP()
Expand All @@ -68,14 +55,14 @@ def test_find_MAP():
data = (data - np.mean(data)) / np.std(data)

with Model():
mu = Uniform('mu', -1, 1, transform=None)
sigma = Uniform('sigma', .5, 1.5, transform=None)
mu = Uniform('mu', -1, 1)
sigma = Uniform('sigma', .5, 1.5)
Normal('y', mu=mu, tau=sigma**-2, observed=data)

# Test gradient minimization
map_est1 = starting.find_MAP()
# Test non-gradient minimization
map_est2 = starting.find_MAP(fmin=starting.optimize.fmin_powell)
map_est2 = starting.find_MAP(method="Powell")

close_to(map_est1['mu'], 0, tol)
close_to(map_est1['sigma'], 1, tol)
Expand Down
4 changes: 2 additions & 2 deletions pymc3/tests/test_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ def test_mle_jacobian():

start, model, _ = models.simple_normal(bounded_prior=False)
with model:
map_estimate = find_MAP(model=model)
map_estimate = find_MAP(method="BFGS", model=model)

rtol = 1E-5 # this rtol should work on both floatX precisions
np.testing.assert_allclose(map_estimate["mu_i"], truth, rtol=rtol)

start, model, _ = models.simple_normal(bounded_prior=True)
with model:
map_estimate = find_MAP(model=model)
map_estimate = find_MAP(method="BFGS", model=model)

np.testing.assert_allclose(map_estimate["mu_i"], truth, rtol=rtol)
Loading