diff --git a/quantecon/optimize/__init__.py b/quantecon/optimize/__init__.py index 0704bb4f8..ea049aa03 100644 --- a/quantecon/optimize/__init__.py +++ b/quantecon/optimize/__init__.py @@ -3,4 +3,5 @@ """ from .scalar_maximization import brent_max +from .nelder_mead import nelder_mead from .root_finding import newton, newton_halley, newton_secant, bisect, brentq diff --git a/quantecon/optimize/nelder_mead.py b/quantecon/optimize/nelder_mead.py new file mode 100644 index 000000000..d0f7592a5 --- /dev/null +++ b/quantecon/optimize/nelder_mead.py @@ -0,0 +1,437 @@ +""" +Implements the Nelder-Mead algorithm for maximizing a function with one or more +variables. + +""" + +import numpy as np +from numba import njit +from collections import namedtuple + +results = namedtuple('results', 'x fun success nit final_simplex') + + +@njit +def nelder_mead(fun, x0, bounds=np.array([[], []]).T, args=(), tol_f=1e-10, + tol_x=1e-10, max_iter=1000): + """ + .. highlight:: none + + Maximize a scalar-valued function with one or more variables using the + Nelder-Mead method. + + This function is JIT-compiled in `nopython` mode using Numba. + + Parameters + ---------- + fun : callable + The objective function to be maximized. + `fun(x, *args) -> float` + where x is an 1-D array with shape (n,) and args is a tuple of the + fixed parameters needed to completely specify the function. This + function must be JIT-compiled in `nopython` mode using Numba. + + x0 : ndarray(float, ndim=1) + Initial guess. Array of real elements of size (n,), where ‘n’ is the + number of independent variables. + + bounds: ndarray(float, ndim=2), optional + Bounds for each variable for proposed solution, encoded as a sequence + of (min, max) pairs for each element in x. The default option is used + to specify no bounds on x. + + args : tuple, optional + Extra arguments passed to the objective function. + + tol_f : scalar(float), optional(default=1e-10) + Tolerance to be used for the function value convergence test. + + tol_x : scalar(float), optional(default=1e-10) + Tolerance to be used for the function domain convergence test. + + max_iter : scalar(float), optional(default=1000) + The maximum number of allowed iterations. + + Returns + ---------- + results : namedtuple + A namedtuple containing the following items: + :: + + "x" : Approximate local maximizer + "fun" : Approximate local maximum value + "success" : 1 if the algorithm successfully terminated, 0 otherwise + "nit" : Number of iterations + "final_simplex" : Vertices of the final simplex + + Examples + -------- + >>> @njit + ... def rosenbrock(x): + ... return -(100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0])**2) + ... + >>> x0 = np.array([-2, 1]) + >>> qe.optimize.maximize(rosenbrock, x0) + results(x=array([0.99999814, 0.99999756]), fun=1.6936258239463265e-10, + success=True, nit=110) + + Notes + -------- + This algorithm has a long history of successful use in applications, but it + will usually be slower than an algorithm that uses first or second + derivative information. In practice, it can have poor performance in + high-dimensional problems and is not robust to minimizing complicated + functions. Additionally, there currently is no complete theory describing + when the algorithm will successfully converge to the minimum, or how fast + it will if it does. + + References + ---------- + + .. [1] J. C. Lagarias, J. A. Reeds, M. H. Wright and P. E. Wright, + Convergence Properties of the Nelder–Mead Simplex Method in Low + Dimensions, SIAM. J. Optim. 9, 112–147 (1998). + + .. [2] S. Singer and S. Singer, Efficient implementation of the Nelder–Mead + search algorithm, Appl. Numer. Anal. Comput. Math., vol. 1, no. 2, + pp. 524–534, 2004. + + .. [3] J. A. Nelder and R. Mead, A simplex method for function + minimization, Comput. J. 7, 308–313 (1965). + + .. [4] Gao, F. and Han, L., Implementing the Nelder-Mead simplex algorithm + with adaptive parameters, Comput Optim Appl (2012) 51: 259. + + .. [5] http://www.scholarpedia.org/article/Nelder-Mead_algorithm + + .. [6] http://www.brnt.eu/phd/node10.html#SECTION00622200000000000000 + + .. [7] Chase Coleman's tutorial on Nelder Mead + + .. [8] SciPy's Nelder-Mead implementation + + """ + vertices = _initialize_simplex(x0, bounds) + + results = _nelder_mead_algorithm(fun, vertices, bounds, args=args, + tol_f=tol_f, tol_x=tol_x, + max_iter=max_iter) + + return results + + +@njit +def _nelder_mead_algorithm(fun, vertices, bounds=np.array([[], []]).T, + args=(), ρ=1., χ=2., γ=0.5, σ=0.5, tol_f=1e-8, + tol_x=1e-8, max_iter=1000): + """ + .. highlight:: none + + Implements the Nelder-Mead algorithm described in Lagarias et al. (1998) + modified to maximize instead of minimizing. JIT-compiled in `nopython` + mode using Numba. + + Parameters + ---------- + fun : callable + The objective function to be maximized. + `fun(x, *args) -> float` + where x is an 1-D array with shape (n,) and args is a tuple of the + fixed parameters needed to completely specify the function. This + function must be JIT-compiled in `nopython` mode using Numba. + + vertices : ndarray(float, ndim=2) + Initial simplex with shape (n+1, n) to be modified in-place. + + args : tuple, optional + Extra arguments passed to the objective function. + + ρ : scalar(float), optional(default=1.) + Reflection parameter. Must be strictly greater than 0. + + χ : scalar(float), optional(default=2.) + Expansion parameter. Must be strictly greater than max(1, ρ). + + γ : scalar(float), optional(default=0.5) + Contraction parameter. Must be stricly between 0 and 1. + + σ : scalar(float), optional(default=0.5) + Shrinkage parameter. Must be strictly between 0 and 1. + + tol_f : scalar(float), optional(default=1e-10) + Tolerance to be used for the function value convergence test. + + tol_x : scalar(float), optional(default=1e-10) + Tolerance to be used for the function domain convergence test. + + max_iter : scalar(float), optional(default=1000) + The maximum number of allowed iterations. + + Returns + ---------- + results : namedtuple + A namedtuple containing the following items: + :: + + "x" : Approximate solution + "fun" : Approximate local maximum + "success" : 1 if successfully terminated, 0 otherwise + "nit" : Number of iterations + "final_simplex" : The vertices of the final simplex + + """ + n = vertices.shape[1] + _check_params(ρ, χ, γ, σ, bounds, n) + + nit = 0 + + ργ = ρ * γ + ρχ = ρ * χ + σ_n = σ ** n + + f_val = np.empty(n+1, dtype=np.float64) + for i in range(n+1): + f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i], args=args) + + # Step 1: Sort + sort_ind = f_val.argsort() + LV_ratio = 1 + + # Compute centroid + x_bar = vertices[sort_ind[:n]].sum(axis=0) / n + + while True: + shrink = False + + # Check termination + fail = nit >= max_iter + + best_val_idx = sort_ind[0] + worst_val_idx = sort_ind[n] + + term_f = f_val[worst_val_idx] - f_val[best_val_idx] < tol_f + + # Linearized volume ratio test (see [2]) + term_x = LV_ratio < tol_x + + if term_x or term_f or fail: + break + + # Step 2: Reflection + x_r = x_bar + ρ * (x_bar - vertices[worst_val_idx]) + f_r = _neg_bounded_fun(fun, bounds, x_r, args=args) + + if f_r >= f_val[best_val_idx] and f_r < f_val[sort_ind[n-1]]: + # Accept reflection + vertices[worst_val_idx] = x_r + LV_ratio *= ρ + + # Step 3: Expansion + elif f_r < f_val[best_val_idx]: + x_e = x_bar + χ * (x_r - x_bar) + f_e = _neg_bounded_fun(fun, bounds, x_e, args=args) + if f_e < f_r: # Greedy minimization + vertices[worst_val_idx] = x_e + LV_ratio *= ρχ + else: + vertices[worst_val_idx] = x_r + LV_ratio *= ρ + + # Step 4 & 5: Contraction and Shrink + else: + # Step 4: Contraction + if f_r < f_val[worst_val_idx]: # Step 4.a: Outside Contraction + x_c = x_bar + γ * (x_r - x_bar) + LV_ratio_update = ργ + else: # Step 4.b: Inside Contraction + x_c = x_bar - γ * (x_r - x_bar) + LV_ratio_update = γ + + f_c = _neg_bounded_fun(fun, bounds, x_c, args=args) + if f_c < min(f_r, f_val[worst_val_idx]): # Accept contraction + vertices[worst_val_idx] = x_c + LV_ratio *= LV_ratio_update + + # Step 5: Shrink + else: + shrink = True + for i in sort_ind[1:]: + vertices[i] = vertices[best_val_idx] + σ * \ + (vertices[i] - vertices[best_val_idx]) + f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i], + args=args) + + sort_ind[1:] = f_val[sort_ind[1:]].argsort() + 1 + + x_bar = vertices[best_val_idx] + σ * \ + (x_bar - vertices[best_val_idx]) + \ + (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n + + LV_ratio *= σ_n + + if not shrink: # Nonshrink ordering rule + f_val[worst_val_idx] = _neg_bounded_fun(fun, bounds, + vertices[worst_val_idx], + args=args) + + for i, j in enumerate(sort_ind): + if f_val[worst_val_idx] < f_val[j]: + sort_ind[i+1:] = sort_ind[i:-1] + sort_ind[i] = worst_val_idx + break + + x_bar += (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n + + nit += 1 + + return results(vertices[sort_ind[0]], -f_val[sort_ind[0]], not fail, nit, + vertices) + + +@njit +def _initialize_simplex(x0, bounds): + """ + Generates an initial simplex for the Nelder-Mead method. JIT-compiled in + `nopython` mode using Numba. + + Parameters + ---------- + x0 : ndarray(float, ndim=1) + Initial guess. Array of real elements of size (n,), where ‘n’ is the + number of independent variables. + + bounds: ndarray(float, ndim=2) + Sequence of (min, max) pairs for each element in x0. + + Returns + ---------- + vertices : ndarray(float, ndim=2) + Initial simplex with shape (n+1, n). + + """ + n = x0.size + + vertices = np.empty((n + 1, n), dtype=np.float64) + + # Broadcast x0 on row dimension + vertices[:] = x0 + + nonzdelt = 0.05 + zdelt = 0.00025 + + for i in range(n): + # Generate candidate coordinate + if vertices[i + 1, i] != 0.: + vertices[i + 1, i] *= (1 + nonzdelt) + else: + vertices[i + 1, i] = zdelt + + return vertices + + +@njit +def _check_params(ρ, χ, γ, σ, bounds, n): + """ + Checks whether the parameters for the Nelder-Mead algorithm are valid. + JIT-compiled in `nopython` mode using Numba. + + Parameters + ---------- + ρ : scalar(float) + Reflection parameter. Must be strictly greater than 0. + + χ : scalar(float) + Expansion parameter. Must be strictly greater than max(1, ρ). + + γ : scalar(float) + Contraction parameter. Must be stricly between 0 and 1. + + σ : scalar(float) + Shrinkage parameter. Must be strictly between 0 and 1. + + bounds: ndarray(float, ndim=2) + Sequence of (min, max) pairs for each element in x. + + n : scalar(int) + Number of independent variables. + + """ + if ρ < 0: + raise ValueError("ρ must be strictly greater than 0.") + if χ < 1: + raise ValueError("χ must be strictly greater than 1.") + if χ < ρ: + raise ValueError("χ must be strictly greater than ρ.") + if γ < 0 or γ > 1: + raise ValueError("γ must be strictly between 0 and 1.") + if σ < 0 or σ > 1: + raise ValueError("σ must be strictly between 0 and 1.") + + if not (bounds.shape == (0, 2) or bounds.shape == (n, 2)): + raise ValueError("The shape of `bounds` is not valid.") + if (np.atleast_2d(bounds)[:, 0] > np.atleast_2d(bounds)[:, 1]).any(): + raise ValueError("Lower bounds must be greater than upper bounds.") + + +@njit +def _check_bounds(x, bounds): + """ + Checks whether `x` is within `bounds`. JIT-compiled in `nopython` mode + using Numba. + + Parameters + ---------- + x : ndarray(float, ndim=1) + 1-D array with shape (n,) of independent variables. + + bounds: ndarray(float, ndim=2) + Sequence of (min, max) pairs for each element in x. + + Returns + ---------- + bool + `True` if `x` is within `bounds`, `False` otherwise. + + """ + if bounds.shape == (0, 2): + return True + else: + return ((np.atleast_2d(bounds)[:, 0] <= x).all() and + (x <= np.atleast_2d(bounds)[:, 1]).all()) + + +@njit +def _neg_bounded_fun(fun, bounds, x, args=()): + """ + Wrapper for bounding and taking the negative of `fun` for the + Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba. + + Parameters + ---------- + fun : callable + The objective function to be minimized. + `fun(x, *args) -> float` + where x is an 1-D array with shape (n,) and args is a tuple of the + fixed parameters needed to completely specify the function. This + function must be JIT-compiled in `nopython` mode using Numba. + + bounds: ndarray(float, ndim=2) + Sequence of (min, max) pairs for each element in x. + + x : ndarray(float, ndim=1) + 1-D array with shape (n,) of independent variables at which `fun` is + to be evaluated. + + args : tuple, optional + Extra arguments passed to the objective function. + + Returns + ---------- + scalar + `-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise. + + """ + if _check_bounds(x, bounds): + return -fun(x, *args) + else: + return np.inf diff --git a/quantecon/optimize/tests/__init__.py b/quantecon/optimize/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/quantecon/optimize/tests/test_nelder_mead.py b/quantecon/optimize/tests/test_nelder_mead.py new file mode 100644 index 000000000..c961a4137 --- /dev/null +++ b/quantecon/optimize/tests/test_nelder_mead.py @@ -0,0 +1,409 @@ +""" +Tests for `multivar_maximization.py` + +""" + +import numpy as np +from numba import njit +from numpy.testing import assert_allclose +from nose.tools import raises + +from quantecon.optimize import nelder_mead +from ..nelder_mead import _nelder_mead_algorithm + + +@njit +def rosenbrock(x): + # Rosenbrock (1960) + f = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0])**2 + return -f + + +@njit +def powell(x): + # Powell (1962) + f = (x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2 + \ + (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4 + return -f + + +@njit +def mccormick(x): + # https://www.sfu.ca/~ssurjano/mccorm.html + f = np.sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + \ + 2.5 * x[1] + 1 + return -f + + +@njit +def bohachevsky(x): + # https://www.sfu.ca/~ssurjano/boha.html + f = x[0] ** 2 + x[1] ** 2 - 0.3 * np.cos(3 * np.pi * x[0]) - \ + 0.4 * np.cos(4 * np.pi * x[1]) + 0.7 + return -f + + +@njit +def easom(x): + # https://www.sfu.ca/~ssurjano/easom.html + f = -(np.cos(x[0]) * np.cos(x[1]) * np.exp(-(x[0] - np.pi) ** 2 - + (x[1] - np.pi) ** 2)) + return -f + + +@njit +def perm_function(x, β): + # https://www.sfu.ca/~ssurjano/perm0db.html + d = x.size + f = 0 + for i in range(1, d+1): + temp = 0 + for j in range(1, d+1): + temp += (j + β) * (x[j-1] ** i - 1 / (j ** i)) + f += temp ** 2 + + return -f + + +@njit +def rotated_hyper_ellipsoid(x): + # https://www.sfu.ca/~ssurjano/rothyp.html + d = x.size + f = 0 + for i in range(1, d+1): + for j in range(i): + f += x[j-1] ** 2 + + return -f + + +@njit +def booth(x): + # https://www.sfu.ca/~ssurjano/booth.html + f = (x[0] + 2 * x[1] - 7) ** 2 + (2 * x[0] + x[1] - 5) ** 2 + return -f + + +@njit +def zakharov(x): + # https://www.sfu.ca/~ssurjano/zakharov.html + d = x.size + _range = np.arange(1, d+1) + f = (x ** 2).sum() + (0.5 * x * _range).sum() ** 2 + \ + (0.5 * x * _range).sum() ** 4 + return -f + + +@njit +def colville(x): + # https://www.sfu.ca/~ssurjano/colville.html + f = 100 * (x[0] ** 2 - x[1]) ** 2 + (x[0] - 1) ** 2 + (x[2] - 1) ** 2 + \ + 90 * (x[2] ** 2 - x[3]) ** 2 + 10.1 * \ + ((x[1] - 1) ** 2 + (x[3] - 1) ** 2) + 19.8 * (x[1] - 1) * (x[3] - 1) + return -f + + +@njit +def styblinski_tang(x): + # https://www.sfu.ca/~ssurjano/stybtang.html + d = x.size + f = 0.5 * (x ** 4 - 16 * x ** 2 + 5 * x).sum() + return -f + + +@njit +def goldstein_price(x): + # https://www.sfu.ca/~ssurjano/goldpr.html + p1 = (x[0] + x[1] + 1) ** 2 + p2 = 19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + \ + 3 * x[1] ** 2 + p3 = (2 * x[0] - 3 * x[1]) ** 2 + p4 = 18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + \ + 27 * x[1] ** 2 + + f = (1 + p1 * p2) * (30 + p3 * p4) + return -f + + +@njit +def sum_squared(x): + return - (x ** 2).sum() + + +@njit +def f(x): + return -(x[0]**2 + x[0]) + + +@njit +def g(x): + if x[0] < 1: + return -(0.75 * x[0]**2 - x[0] + 2) + else: + return -(0.5 * x[0] ** 2 - x[0] + 1) + + +@njit +def h(x): + return -(abs(x[0]) + abs(x[1])) + + +class TestMaximization(): + def test_rosenbrock(self): + sol = np.array([1., 1.]) + fun = 0. + + x0 = np.array([-2, 1]) + + results = nelder_mead(rosenbrock, x0, tol_x=1e-20, tol_f=1e-20) + + assert_allclose(results.x, sol, atol=1e-4) + assert_allclose(results.fun, fun, atol=1e-4) + + def test_powell(self): + sol = np.zeros(4) + fun = 0. + + x0 = np.array([3., -1., 0., 1.]) + + results = nelder_mead(powell, x0, tol_x=1e-20, tol_f=1e-20) + + assert_allclose(results.x, sol, atol=1e-4) + assert_allclose(results.fun, fun, atol=1e-4) + + def test_mccormick(self): + sol = np.array([-0.54719, -1.54719]) + fun = 1.9133 + + x0 = np.array([-1., -1.5]) + bounds = np.array([[-1.5, 4.], + [-3., 4.]]) + + results = nelder_mead(mccormick, x0, bounds=bounds) + + assert_allclose(results.x, sol, rtol=1e-3) + assert_allclose(results.fun, fun, rtol=1e-3) + + def test_bohachevsky(self): + sol = np.array([0., 0.]) + fun = 0. + + # Starting point makes significant difference + x0 = np.array([np.pi, -np.pi]) + + results = nelder_mead(bohachevsky, x0) + + assert_allclose(results.x, sol, atol=1e-4) + assert_allclose(results.fun, fun, atol=1e-4) + + def test_easom(self): + sol = np.array([np.pi, np.pi]) + fun = 1. + + x0 = np.array([5, -1]) + + results = nelder_mead(easom, x0, tol_x=1e-20, tol_f=1e-20) + + assert_allclose(results.x, sol, atol=1e-4) + assert_allclose(results.fun, fun, atol=1e-4) + + def test_perm_function(self): + d = 4. + x0 = np.ones(int(d)) + bounds = np.array([[-d, d]] * int(d)) + + sol = np.array([1 / d for d in range(1, int(d)+1)]) + fun = 0. + + results = nelder_mead(perm_function, x0, bounds=bounds, args=(1., ), + tol_x=1e-30, tol_f=1e-30) + + assert_allclose(results.x, sol, atol=1e-7) + assert_allclose(results.fun, fun, atol=1e-7) + + def test_rotated_hyper_ellipsoid(self): + d = 5 + x0 = np.random.normal(size=d) + bounds = np.array([[-65.536, 65.536]] * d) + + sol = np.zeros(d) + fun = 0. + + results = nelder_mead(rotated_hyper_ellipsoid, x0, bounds=bounds, + tol_x=1e-30, tol_f=1e-30) + + assert_allclose(results.x, sol, atol=1e-7) + assert_allclose(results.fun, fun, atol=1e-7) + + def test_booth(self): + x0 = np.array([0., 0.]) + + sol = np.array([1., 3.]) + fun = 0. + + results = nelder_mead(booth, x0, tol_x=1e-20, tol_f=1e-20) + + assert_allclose(results.x, sol, atol=1e-7) + assert_allclose(results.fun, fun, atol=1e-7) + + def test_zakharov(self): + x0 = np.array([-3., 8., 1., 3., -0.5]) + bounds = np.array([[-5., 10.]] * 5) + + sol = np.zeros(5) + fun = 0. + + results = nelder_mead(zakharov, x0, bounds=bounds, tol_f=1e-30, + tol_x=1e-30) + + assert_allclose(results.x, sol, atol=1e-7) + assert_allclose(results.fun, fun, atol=1e-7) + + def test_colville(self): + x0 = np.array([-3.5, 9., 0.25, -1.]) + bounds = np.array([[-10., 10.]] * 4) + + sol = np.ones(4) + fun = 0. + + results = nelder_mead(colville, x0, bounds=bounds, tol_f=1e-35, + tol_x=1e-35) + + assert_allclose(results.x, sol) + assert_allclose(results.fun, fun, atol=1e-7) + + def test_styblinski_tang(self): + d = 8 + x0 = -np.ones(d) + bounds = np.array([[-5., 5.]] * d) + + sol = np.array([-2.903534] * d) + fun = 39.16599 * d + + results = nelder_mead(styblinski_tang, x0, bounds=bounds, tol_f=1e-35, + tol_x=1e-35) + + assert_allclose(results.x, sol, rtol=1e-4) + assert_allclose(results.fun, fun, rtol=1e-5) + + def test_goldstein_price(self): + x0 = np.array([-1.5, 0.5]) + bounds = np.array([[-2., 2.], + [-2., 2.]]) + + results = nelder_mead(goldstein_price, x0) + + sol = np.array([0., -1.]) + fun = -3. + + assert_allclose(results.x, sol, atol=1e-5) + assert_allclose(results.fun, fun) + + def test_sum_squared(self): + x0 = np.array([0.5, -np.pi, np.pi]) + + sol = np.zeros(3) + fun = 0. + + results = nelder_mead(sum_squared, x0, tol_f=1e-50, tol_x=1e-50) + assert_allclose(results.x, sol, atol=1e-5) + assert_allclose(results.fun, fun, atol=1e-5) + + def test_corner_sol(self): + sol = np.array([0.]) + fun = 0. + + x0 = np.array([10.]) + bounds = np.array([[0., np.inf]]) + + results = nelder_mead(f, x0, bounds=bounds, tol_f=1e-20) + + assert_allclose(results.x, sol) + assert_allclose(results.fun, fun) + + def test_discontinuous(self): + sol = np.array([1.]) + fun = -0.5 + + x0 = np.array([-10.]) + + results = nelder_mead(g, x0) + + assert_allclose(results.x, sol) + assert_allclose(results.fun, fun) + + +@raises(ValueError) +def test_invalid_bounds_1(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + x0 = np.array([-2., 1.]) + bounds = np.array([[10., -10.], [10., -10.]]) + nelder_mead(rosenbrock, x0, bounds=bounds) + + +@raises(ValueError) +def test_invalid_bounds_2(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + x0 = np.array([-2., 1.]) + bounds = np.array([[10., -10., 10., -10.]]) + nelder_mead(rosenbrock, x0, bounds=bounds) + + +@raises(ValueError) +def test_invalid_ρ(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + invalid_ρ = -1. + _nelder_mead_algorithm(rosenbrock, vertices, ρ=invalid_ρ) + + +@raises(ValueError) +def test_invalid_χ(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + invalid_χ = 0.5 + _nelder_mead_algorithm(rosenbrock, vertices, χ=invalid_χ) + + +@raises(ValueError) +def test_invalid_ρχ(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + ρ = 2 + χ = 1.5 + _nelder_mead_algorithm(rosenbrock, vertices, ρ=ρ, χ=χ) + + +@raises(ValueError) +def test_invalid_γ(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + invalid_γ = -1e-7 + _nelder_mead_algorithm(rosenbrock, vertices, γ=invalid_γ) + + +@raises(ValueError) +def test_invalid_σ(): + vertices = np.array([[-2., 1.], + [1.05 * -2., 1.], + [-2., 1.05 * 1.]]) + invalid_σ = 1. + 1e-7 + _nelder_mead_algorithm(rosenbrock, vertices, σ=invalid_σ) + + +if __name__ == '__main__': + import sys + import nose + + argv = sys.argv[:] + argv.append('--verbose') + argv.append('--nocapture') + nose.main(argv=argv, defaultTest=__file__)