Skip to content

Commit

Permalink
New unit tests for optimization algorithms: fmin, fmin_cg, fmin_bfgs,…
Browse files Browse the repository at this point in the history
… fmin_powell, fmin_ncg, fmin_l_bfgs_b.

The fmin_l_bfgs_b function is currently broken and fails.  (The problem may be in the handling of character arrays in f2py or lbfgsb.pyf; see http://www.scipy.net/pipermail/scipy-dev/2005-October/003685.html.)
  • Loading branch information
edschofield committed Nov 8, 2005
1 parent 56b0c17 commit 75c340a
Showing 1 changed file with 124 additions and 0 deletions.
124 changes: 124 additions & 0 deletions Lib/optimize/tests/test_optimize.py
@@ -0,0 +1,124 @@
""" Unit tests for optimization routines
Author: Ed Schofield
Nov 2005
"""

from scipy.test.testing import *

set_package_path()
from scipy import *
from scipy.linalg import norm
# from scipy import optimize, array, zeros, float64
restore_path()


class test_optimize(ScipyTestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setUp(self):
self.F = array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = array([1., 0.3, 0.5])
self.startparams = zeros(3, float64)
self.solution = array([0., -0.524869316, 0.487525860])
self.maxiter = 1000


def func(self, x):
log_pdot = dot(self.F, x)
logZ = log(sum(exp(log_pdot)))
f = logZ - dot(self.K, x)
return f


def grad(self, x):
log_pdot = dot(self.F, x)
logZ = log(sum(exp(log_pdot)))
p = exp(log_pdot - logZ)
return dot(self.F.transpose(), p) - self.K


def check_cg(self):
""" conjugate gradient optimization routine
"""
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (), \
maxiter=self.maxiter, \
full_output=True, disp=False, retall=False)

(params, fopt, func_calls, grad_calls, warnflag) = retval

err = abs(self.func(params) - self.func(self.solution))
#print "CG: Difference is: " + str(err)
assert err < 1e-6


def check_bfgs(self):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine
"""
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, \
args=(), maxiter=self.maxiter, \
full_output=True, disp=False, retall=False)

(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval

err = abs(self.func(params) - self.func(self.solution))
#print "BFGS: Difference is: " + str(err)
assert err < 1e-6


def check_powell(self):
""" Powell (direction set) optimization routine
"""
retval = optimize.fmin_powell(self.func, self.startparams, \
args=(), maxiter=self.maxiter, \
full_output=True, disp=False, retall=False)

(params, fopt, direc, numiter, func_calls, warnflag) = retval

err = abs(self.func(params) - self.func(self.solution))
#print "Powell: Difference is: " + str(err)
assert err < 1e-6

def check_neldermead(self):
""" Nelder-Mead simplex algorithm
"""
retval = optimize.fmin(self.func, self.startparams, \
args=(), maxiter=self.maxiter, \
full_output=True, disp=False, retall=False)

(params, fopt, numiter, func_calls, warnflag) = retval

err = abs(self.func(params) - self.func(self.solution))
#print "Nelder-Mead: Difference is: " + str(err)
assert err < 1e-6

def check_ncg(self):
""" line-search Newton conjugate gradient optimization routine
"""
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,\
args=(), maxiter=self.maxiter, \
full_output=False, disp=False, retall=False)

params = retval

err = abs(self.func(params) - self.func(self.solution))
#print "NCG: Difference is: " + str(err)
assert err < 1e-6


def check_l_bfgs_b(self):
""" limited-memory bound-constrained BFGS algorithm
"""
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, self.grad,\
args=(), maxfun=self.maxiter)

(params, fopt, d) = retval

err = abs(self.func(params) - self.func(self.solution))
#print "LBFGSB: Difference is: " + str(err)
assert err < 1e-6


if __name__ == "__main__":
ScipyTest().run()

0 comments on commit 75c340a

Please sign in to comment.