diff --git a/demo.py b/demo.py index da998b1..44efc15 100644 --- a/demo.py +++ b/demo.py @@ -1,10 +1,16 @@ -from optimatic.grad_desc import Optimiser +from optimatic.optimisers.grad_desc import Optimiser import numpy as np +minimum = np.random.normal(scale=5) +print("Actual minimum is: {}".format(minimum)) + def f(x): - return (x - 5.4) ** 2 + return (x - minimum) ** 2 def df(x): - return 2 * (x - 5.4) + return 2 * (x - minimum) + +opt = Optimiser(f, df, np.random.normal(scale=5)) +x = opt.optimise() -opt = Optimiser(f, df, 0.0) +print("Calculated minimum is: {}".format(x)) diff --git a/docs/source/introduction.rst b/docs/source/introduction.rst index dadac94..71a5c96 100644 --- a/docs/source/introduction.rst +++ b/docs/source/introduction.rst @@ -25,4 +25,4 @@ Then import and initialise the optimiser you want to use, e.g.: Then run either :code:`opt.step()` to run one step of the chosen optimisation algorithm, or :code:`opt.optimise()` to run until either :code:`opt.steps` is exceeded, or :code:`opt.precision` is met. See -:func:`~optimatic.grad_desc.Optimiser` for more details. +:func:`~optimatic.optimisers.optimiser.Optimiser` for more details. diff --git a/docs/source/optimatic.optimisers.rst b/docs/source/optimatic.optimisers.rst new file mode 100644 index 0000000..cb0aab2 --- /dev/null +++ b/docs/source/optimatic.optimisers.rst @@ -0,0 +1,30 @@ +optimatic.optimisers package +============================ + +Submodules +---------- + +optimatic.optimisers.grad_desc module +------------------------------------- + +.. automodule:: optimatic.optimisers.grad_desc + :members: + :undoc-members: + :show-inheritance: + +optimatic.optimisers.optimiser module +------------------------------------- + +.. automodule:: optimatic.optimisers.optimiser + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: optimatic.optimisers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/optimatic.rst b/docs/source/optimatic.rst index 42d4054..12e2eb6 100644 --- a/docs/source/optimatic.rst +++ b/docs/source/optimatic.rst @@ -6,20 +6,9 @@ Subpackages .. toctree:: + optimatic.optimisers optimatic.utils -Submodules ----------- - -optimatic.grad_desc module --------------------------- - -.. automodule:: optimatic.grad_desc - :members: - :undoc-members: - :show-inheritance: - - Module contents --------------- diff --git a/optimatic/optimisers/__init__.py b/optimatic/optimisers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/optimatic/grad_desc.py b/optimatic/optimisers/grad_desc.py similarity index 62% rename from optimatic/grad_desc.py rename to optimatic/optimisers/grad_desc.py index 839c100..7ea1ca7 100644 --- a/optimatic/grad_desc.py +++ b/optimatic/optimisers/grad_desc.py @@ -11,43 +11,31 @@ \mathbf{x}_n = \mathbf{x}_{n-1} - \gamma \\nabla f(\mathbf{x}_{n-1}) """ import numpy as np +from optimiser import Optimiser as OptimiserBase -class Optimiser(object): +class Optimiser(OptimiserBase): """ :param y: The function to optimise :param dy: The derivative of the function to optimise :param x0: The starting position for the algorithm :param precision: The precision to calculate the minimum to - :param gamma: The starting value for gamma + :param gamma: The starting value for :math:`\gamma` :param steps: The max number of iterations of the algorithm to run """ - def __init__(self, y, dy, x0, precision=0.0001, gamma=0.1, steps=1000): - self.y = y + def __init__(self, y, dy, x0, precision=0.0001, gamma=0.1, steps=10000): + super(Optimiser, self).__init__(y, x0, precision=precision, steps=steps) self.dy = dy - self.precision = precision self.step_size = x0 - self.xn = x0 - self.xn_1 = x0 self.gamma = gamma - self.steps = steps def step(self): - """Runs one iteration of the algorithm""" self.xn_1 = self.xn self.xn = self.xn_1 - self.gamma * self.dy(self.xn_1) grad_diff = self.dy(self.xn) - self.dy(self.xn_1) + if grad_diff == 0.0: + # Algorithm has converged + return xs_diff = self.xn - self.xn_1 self.gamma = np.dot(xs_diff, grad_diff) self.gamma /= np.linalg.norm(grad_diff) ** 2 - - def optimise(self): - """Runs :func:`step` the specified number of times""" - i = 0 - self.step() - step_size = np.linalg.norm(self.xn - self.xn_1) - while step_size < self.precision and i < self.steps: - self.step() - step_size = np.linalg.norm(self.xn - self.xn_1) - i += 1 - return self.xn diff --git a/optimatic/optimisers/optimiser.py b/optimatic/optimisers/optimiser.py new file mode 100644 index 0000000..9de4bfd --- /dev/null +++ b/optimatic/optimisers/optimiser.py @@ -0,0 +1,39 @@ +""" +Optimiser base class + +All optimiser classes should inherit from this class +""" +from abc import ABCMeta, abstractmethod +import numpy as np + +class Optimiser(object): + """ + :param y: The function to optimise + :param x0: The starting position for the algorithm + :param precision: The precision to calculate the minimum to + :param steps: The max number of iterations of the algorithm to run + """ + __metaclass__ = ABCMeta + + def __init__(self, y, x0, precision=1e-4, steps=10000): + self.y = y + self.xn = x0 + self.xn_1 = x0 + self.precision = precision + self.steps = steps + + @abstractmethod + def step(self): + """Runs one iteration of the algorithm""" + return + + def optimise(self): + """Runs :func:`step` the specified number of times""" + i = 0 + self.step() + step_size = np.linalg.norm(self.xn - self.xn_1) + while step_size > self.precision and i < self.steps: + self.step() + step_size = np.linalg.norm(self.xn - self.xn_1) + i += 1 + return self.xn