Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix optimize defaults for older scipy versions for L-BFGS-B #476

Merged
merged 4 commits into from Nov 23, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
70 changes: 54 additions & 16 deletions dipy/core/optimize.py
Expand Up @@ -4,20 +4,15 @@
Scipy < 0.12. All optimizers are available for scipy >= 0.12.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will soon be inaccurate, as we add stuff from my SFM and LiFE PRs. I might just change this in my PR. I have a few more changes proposed on top of this. You can leave this as-is, but I wanted to give you a heads up on that.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, cool.

"""

import os
from tempfile import mkstemp
from distutils.version import LooseVersion
import numpy as np
import scipy

SCIPY_LESS_0_12 = LooseVersion(scipy.__version__) < '0.12'

if not SCIPY_LESS_0_12:

from scipy.optimize import minimize

else:

from scipy.optimize import fmin_l_bfgs_b, fmin_powell


Expand Down Expand Up @@ -104,7 +99,7 @@ def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,

callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is
the current parameter vector.
the current parameter vector. Only available using Scipy >= 0.12.

options : dict, optional
A dictionary of solver options. All methods accept the following
Expand All @@ -117,33 +112,65 @@ def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
`show_options('minimize', method)`.

evolution : bool, optional
save history of x for each iteration
save history of x for each iteration. Only available using Scipy
>= 0.12.

See also
---------
scipy.optimize.minimize
"""

self.size_of_x = len(x0)
self.tmp_files = []
self._evol_kx = None

_eps = np.finfo(float).eps
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why is eps changed to the lowest possible value? you might get convergence issues at line 155 for example, and anywhere else it's used.


if SCIPY_LESS_0_12:

if evolution is True:
print('Saving history is available only with Scipy >= 0.12.')

if method == 'L-BFGS-B':
default_options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5,
'eps': 1e-8, 'maxiter': 1000}

if jac is None:
approx_grad = True
else:
approx_grad = False

out = fmin_l_bfgs_b(fun, x0, args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol'] / np.finfo(float).eps,
pgtol=options['gtol'],
epsilon=options['eps'])
if options is None:
options = default_options

if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options

try:
out = fmin_l_bfgs_b(fun, x0, args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxiter=options['maxiter'])
except TypeError:

msg = 'In Scipy ' + scipy.__version__ + ' `maxiter` '
msg += 'parameter is not available for L-BFGS-B. \n Using '
msg += '`maxfun` instead with value twice of maxiter.'

print(msg)
out = fmin_l_bfgs_b(fun, x0, args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxfun=options['maxiter'] * 2)

res = {'x': out[0], 'fun': out[1], 'nfev': out[2]['funcalls']}
try:
Expand All @@ -153,6 +180,17 @@ def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,

elif method == 'Powell':

default_options = {'xtol': 0.0001, 'ftol': 0.0001,
'maxiter': None}

if options is None:
options = default_options

if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options

out = fmin_powell(fun, x0, args,
xtol=options['xtol'],
ftol=options['ftol'],
Expand All @@ -168,7 +206,7 @@ def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
else:

msg = 'Only L-BFGS-B and Powell is supported in this class '
msg += 'for versions of Scipy < 0.11.'
msg += 'for versions of Scipy < 0.12.'
raise ValueError(msg)

if not SCIPY_LESS_0_12:
Expand Down
65 changes: 60 additions & 5 deletions dipy/core/tests/test_optimize.py
@@ -1,4 +1,3 @@
import os
import numpy as np
from numpy.testing import (assert_equal,
assert_almost_equal,
Expand All @@ -21,19 +20,20 @@ def func2(x):

if not SCIPY_LESS_0_12:

print('Scipy >= 0.12')

opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]), method='Powell')

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))

assert_almost_equal(opt.fopt, 0)

opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]), method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8})

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))

assert_almost_equal(opt.fopt, 0)
assert_equal(opt.evolution, None)

assert_equal(opt.evolution, None)

Expand All @@ -43,7 +43,6 @@ def func2(x):
evolution=False)

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))

assert_almost_equal(opt.fopt, 0)

opt.print_summary()
Expand Down Expand Up @@ -78,13 +77,14 @@ def func2(x):

if SCIPY_LESS_0_12:

print('Scipy < 0.12')

opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8})

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))

assert_almost_equal(opt.fopt, 0)

print(opt.nit)
Expand All @@ -102,6 +102,61 @@ def func2(x):

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))

opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options={'maxcor': 10, 'eps': 1e-8})

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
assert_almost_equal(opt.fopt, 0)

print(opt.nit)
print(opt.fopt)
print(opt.nfev)

opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options=None)

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
assert_almost_equal(opt.fopt, 0)

print(opt.nit)
print(opt.fopt)
print(opt.nfev)

opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='L-BFGS-B',
options={'gtol': 1e-7, 'ftol': 1e-7, 'maxiter': 10000})

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]), 4)
assert_almost_equal(opt.fopt, 0)

print(opt.nit)
print(opt.fopt)
print(opt.nfev)

opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'maxiter': 1e6},
evolution=True)

print(opt.nit)
print(opt.fopt)
print(opt.nfev)

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))

opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'maxiter': 1e6},
evolution=True)

print(opt.nit)
print(opt.fopt)
print(opt.nfev)

assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))


if __name__ == '__main__':

Expand Down