diff --git a/README.rst b/README.rst
index 5f1264d..0df4823 100644
--- a/README.rst
+++ b/README.rst
@@ -12,7 +12,7 @@ In the M-LOOP source folder.
For more details on how to use the package see the documentation. You can see it online at
-?
+http://m-loop.readthedocs.io/
Or you can build it by entering the docs folder and running:
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
deleted file mode 100644
index eeb6fac..0000000
--- a/doc/source/contributing.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-.. _sec-contributing:
-
-Contributing
-============
-
-
-
-
diff --git a/doc/source/examples.rst b/doc/source/examples.rst
deleted file mode 100644
index 9aa7edb..0000000
--- a/doc/source/examples.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _sec-examples:
-
-Examples
-========
-
-Blah
\ No newline at end of file
diff --git a/doc/source/images/M-LOOP_logo.pdf b/doc/source/images/M-LOOP_logo.pdf
deleted file mode 100644
index bbb7a85..0000000
Binary files a/doc/source/images/M-LOOP_logo.pdf and /dev/null differ
diff --git a/doc/source/images/M-LOOP_logo.png b/doc/source/images/M-LOOP_logo.png
deleted file mode 100644
index bc88a1e..0000000
Binary files a/doc/source/images/M-LOOP_logo.png and /dev/null differ
diff --git a/doc/source/options.rst b/doc/source/options.rst
deleted file mode 100644
index f572fd4..0000000
--- a/doc/source/options.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. _sec-options:
-
-Options
--------
-I like turtles 3
\ No newline at end of file
diff --git a/doc/source/visualizations.rst b/doc/source/visualizations.rst
deleted file mode 100644
index 3b2f290..0000000
--- a/doc/source/visualizations.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _sec-visualizations:
-
-Visualizations
-==============
-
-Blah
\ No newline at end of file
diff --git a/doc/Makefile b/docs/Makefile
similarity index 98%
rename from doc/Makefile
rename to docs/Makefile
index 5213de8..e53d317 100644
--- a/doc/Makefile
+++ b/docs/Makefile
@@ -5,7 +5,7 @@
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
-BUILDDIR = build
+BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
@@ -15,9 +15,9 @@ endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help
help:
diff --git a/doc/source/images/M-LOOP_diagram.png b/docs/_static/M-LOOP_diagram.png
similarity index 100%
rename from doc/source/images/M-LOOP_diagram.png
rename to docs/_static/M-LOOP_diagram.png
diff --git a/doc/source/images/M-LOOP_logo.ico b/docs/_static/M-LOOP_logo.ico
similarity index 100%
rename from doc/source/images/M-LOOP_logo.ico
rename to docs/_static/M-LOOP_logo.ico
diff --git a/docs/_static/M-LOOP_logo.png b/docs/_static/M-LOOP_logo.png
new file mode 100644
index 0000000..172aece
Binary files /dev/null and b/docs/_static/M-LOOP_logo.png differ
diff --git a/docs/_static/M-LOOP_visualizations.png b/docs/_static/M-LOOP_visualizations.png
new file mode 100644
index 0000000..6e4c45e
Binary files /dev/null and b/docs/_static/M-LOOP_visualizations.png differ
diff --git a/doc/source/images/M-LOOPandBEC.png b/docs/_static/M-LOOPandBEC.png
similarity index 100%
rename from doc/source/images/M-LOOPandBEC.png
rename to docs/_static/M-LOOPandBEC.png
diff --git a/doc/source/api/controllers.rst b/docs/api/controllers.rst
similarity index 100%
rename from doc/source/api/controllers.rst
rename to docs/api/controllers.rst
diff --git a/doc/source/api/index.rst b/docs/api/index.rst
similarity index 100%
rename from doc/source/api/index.rst
rename to docs/api/index.rst
diff --git a/doc/source/api/interfaces.rst b/docs/api/interfaces.rst
similarity index 100%
rename from doc/source/api/interfaces.rst
rename to docs/api/interfaces.rst
diff --git a/doc/source/api/launchers.rst b/docs/api/launchers.rst
similarity index 100%
rename from doc/source/api/launchers.rst
rename to docs/api/launchers.rst
diff --git a/doc/source/api/learners.rst b/docs/api/learners.rst
similarity index 100%
rename from doc/source/api/learners.rst
rename to docs/api/learners.rst
diff --git a/doc/source/api/mloop.rst b/docs/api/mloop.rst
similarity index 100%
rename from doc/source/api/mloop.rst
rename to docs/api/mloop.rst
diff --git a/doc/source/api/t_esting.rst b/docs/api/t_esting.rst
similarity index 100%
rename from doc/source/api/t_esting.rst
rename to docs/api/t_esting.rst
diff --git a/doc/source/api/utilities.rst b/docs/api/utilities.rst
similarity index 100%
rename from doc/source/api/utilities.rst
rename to docs/api/utilities.rst
diff --git a/doc/source/api/visualizations.rst b/docs/api/visualizations.rst
similarity index 100%
rename from doc/source/api/visualizations.rst
rename to docs/api/visualizations.rst
diff --git a/doc/source/conf.py b/docs/conf.py
similarity index 93%
rename from doc/source/conf.py
rename to docs/conf.py
index 051ed94..0a82809 100644
--- a/doc/source/conf.py
+++ b/docs/conf.py
@@ -15,6 +15,7 @@
import sys
import os
+import mloop
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -70,9 +71,9 @@
# built documents.
#
# The short X.Y version.
-version = '2.0'
+version = mloop.__version__
# The full version, including alpha/beta/rc tags.
-release = '2.0.1'
+release = mloop.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -127,10 +128,25 @@
# a list of builtin themes.
html_theme = 'alabaster'
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = { '**': ['about.html','navigation.html','relations.html', 'searchbox.html'], }
+
+#'globaltoc.html',
+
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+html_theme_options = {'logo':'M-LOOP_logo.png',
+'logo_name':True,
+'description':'Machine-Learning Online Optimization Package',
+'github_user':'michaelhush',
+'github_repo':'M-LOOP',
+'github_banner':True,
+'font_family':"Arial, Helvetica, sans-serif",
+'head_font_family':"Arial, Helvetica, sans-serif",
+'analytics_id':'UA-83520804-1'}
+
+#'github_button':True,
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
@@ -144,12 +160,12 @@
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = 'images/M-LOOP_logo.png'
+#html_logo = '_static/M-LOOP_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-html_favicon = 'images/M-LOOP_logo.ico'
+html_favicon = '_static/M-LOOP_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@@ -170,9 +186,6 @@
# typographically correct entities.
#html_use_smartypants = True
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'searchbox.html'], }
-
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
@@ -247,7 +260,7 @@
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-latex_logo = 'images/M-LOOP_logo.pdf'
+#latex_logo = 'M-LOOP_logo.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
diff --git a/docs/contributing.rst b/docs/contributing.rst
new file mode 100644
index 0000000..e7f98a2
--- /dev/null
+++ b/docs/contributing.rst
@@ -0,0 +1,37 @@
+.. _sec-contributing:
+
+Contributing
+============
+
+If you use M-LOOP please consider contributing to the project. There are many quick and easy ways to help out.
+
+- If you use M-LOOP be sure to cite paper where it first used: `'Fast machine-learning online optimization of ultra-cold-atom experiments', Sci Rep 6, 25890 (2016) `_.
+- Star and watch the `M-LOOP github `_.
+- Make a suggestion on what features you would like added, or report an issue, on the `github `_ or by `email `_.
+- Contribute your own code to the `M-LOOP github `_, this could be the interface you designed, more options or a completely new solver.
+
+Finally spread the word! Let others know the success you have had with M-LOOP and recommend they try it too.
+
+Contributors
+------------
+
+M-LOOP is written and maintained by `Michael R Hush `_
+
+Other contributors, listed alphabetically, are:
+
+* John W. Bastian - design, first demonstration
+* Patrick J. Everitt - testing, design, first demonstration
+* Kyle S. Hardman - design, first demonstration
+* Anton van den Hengel - design, first demonstration
+* Joe J. Hope - design, first demonstration
+* Carlos C. N. Kuhn - first demonstration
+* Andre N. Luiten - first demonstration
+* Gordon D. McDonald - first demonstration
+* Manju Perumbil - first demonstration
+* Ian R. Petersen - first demonstration
+* Ciaron D. Quinlivan - first demonstration
+* Alex Ratcliff - testing
+* Nick P. Robins - first demonstration
+* Mahasen A. Sooriyabandara - first demonstration
+* Richard Taylor - testing
+* Paul B. Wigley - testing, design, first demonstration
diff --git a/doc/source/data.rst b/docs/data.rst
similarity index 100%
rename from doc/source/data.rst
rename to docs/data.rst
diff --git a/docs/examples.rst b/docs/examples.rst
new file mode 100644
index 0000000..8ec9258
--- /dev/null
+++ b/docs/examples.rst
@@ -0,0 +1,102 @@
+.. _sec-examples:
+
+Examples
+========
+
+M-LOOP includes a series of example configuration files for each of the controllers and interfaces. The examples can be found in examples folder. For some controllers there are two files, ones ending with *_basic_config* which includes the standard configuration options and *_complete_config* which include a comprehensive list of all the configuration options available.
+
+The options available are also comprehensively documented in the :ref:`sec-api` as keywords for each of the classes. However, the quickest and easiest way to learn what options are available, if you are not familiar with python, is to just look at the provided examples.
+
+Each of the example files is used when running tests of M-LOOP. So please copy and modify them elsewhere if you use them as a starting point for your configuration file.
+
+Interfaces
+----------
+
+There is currently one interface supported: 'file'. You can specify which interface you want with the option::
+
+ interface_type = [name]
+
+The default will be 'file'. The specific options for each of the interfaces are described below.
+
+File Interface
+~~~~~~~~~~~~~~
+
+You can change the names of the files used for the file interface and their type. The file interface options are described in *file_interface_config.txt*.
+
+.. include:: ../examples/file_interface_config.txt
+ :literal:
+
+Controllers
+-----------
+
+There are currently three controller types supported: 'gaussian_process', 'random' and 'nelder_mead'. The default is 'gaussian_process'. You can set which interface you want to use with the option::
+
+ controller_type = [name]
+
+Each of the controllers and their specific options are described below. There is also a set of common options shared by all controllers which is described in *controller_options.txt*. The common options include the parameter settings and the halting conditions.
+
+.. include:: ../examples/controller_config.txt
+ :literal:
+
+Gaussian Process
+~~~~~~~~~~~~~~~~
+
+The Gaussian-process controller is the default controller and is the currently the most sophisticated machine learner algorithm. It uses a `Link Gaussian process `_ to develop a model for how the parameters relate to the measured cost, effectively creating a model for how the experiment operates. This model is then used when picking new points to test.
+
+There are two example files for the Gaussian-process controller: *gaussian_process_simple_config.txt* which contains the basic options.
+
+.. include:: ../examples/gaussian_process_simple_config.txt
+ :literal:
+
+*gaussian_process_complete_config.txt* which contains a comprehensive list of options.
+
+.. include:: ../examples/gaussian_process_complete_config.txt
+ :literal:
+
+Nelder Mead
+~~~~~~~~~~~
+
+The Nelder Mead controller implements the `Link Nelder-Mead method `_ for optimization. You can control the starting point and size of the initial simplex of the method with the configuration file.
+
+There are two example files for the Nelder-Mead controller: *nelder_mead_simple_config.txt* which contains the basic options.
+
+.. include:: ../examples/nelder_mead_simple_config.txt
+ :literal:
+
+*nelder_mead_complete_config.txt* which contains a comprehensive list of options.
+
+.. include:: ../examples/nelder_mead_complete_config.txt
+ :literal:
+
+Random
+~~~~~~
+
+The random optimization algorithm picks parameters randomly from a uniform distribution from within the parameter bounds or trust region.
+
+There are two example files for the random controller: *random_simple_config.txt* which contains the basic options.
+
+.. include:: ../examples/random_simple_config.txt
+ :literal:
+
+*random_complete_config.txt* which contains a comprehensive list of options.
+
+.. include:: ../examples/random_complete_config.txt
+ :literal:
+
+Logging
+-------
+
+You can control the filename of the logs and also the level which is reported to the file and the console. For more information see `Link logging levels `_. The logging options are described in *logging_config.txt*.
+
+.. include:: ../examples/logging_config.txt
+ :literal:
+
+Extras
+------
+
+Extras refers to options related to post processing your data once the optimization is complete. Currently the only extra option is for visualizations. The extra options are described in *extras_config.txt*.
+
+.. include:: ../examples/extras_config.txt
+ :literal:
+
+
diff --git a/doc/source/index.rst b/docs/index.rst
similarity index 85%
rename from doc/source/index.rst
rename to docs/index.rst
index 84d87c0..aa76525 100644
--- a/doc/source/index.rst
+++ b/docs/index.rst
@@ -4,12 +4,12 @@ M-LOOP
The Machine-Learner Online Optimization Package is designed to automatically and rapidly optimize the parameters of a scientific experiment or computer controller system.
-.. figure:: images/M-LOOPandBEC.png
+.. figure:: _static/M-LOOPandBEC.png
:alt: M-LOOP optimizing a BEC.
- M-LOOP in control of a ultra-cold atom experiment. M-LOOP was able to find an optimal set of ramps to evaporatively cool a thermal gas and form a Bose-Einstein Condensate.
+ M-LOOP in control of an ultra-cold atom experiment. M-LOOP was able to find an optimal set of ramps to evaporatively cool a thermal gas and form a Bose-Einstein Condensate.
-Using M-LOOP is simple, once the parameters of your experiment is computer controller, all you need to do is determine a cost function that quantifies the performance of an experiment after a single run. You can then hand over control of the experiment to M-LOOP which will find a global optimal set of parameters that minimize the cost function, by performing a few experiments and testing different parameters. M-LOOP uses machine-learning to predict how the parameters of the experiment relate to the cost, it uses this model to pick the next best parameters to test to find an optimum as quickly as possible.
+Using M-LOOP is simple, once the parameters of your experiment is computer controlled, all you need to do is determine a cost function that quantifies the performance of an experiment after a single run. You can then hand over control of the experiment to M-LOOP which will find a global optimal set of parameters that minimize the cost function, by performing a few experiments and testing different parameters. M-LOOP uses machine-learning to predict how the parameters of the experiment relate to the cost, it uses this model to pick the next best parameters to test to find an optimum as quickly as possible.
M-LOOP not only finds an optimal set of parameters for the experiment it also provides a model of how the parameters are related to the costs which can be used to improve the experiment.
@@ -35,7 +35,6 @@ Contents
interfaces
data
visualizations
- options
examples
contributing
api/index
diff --git a/doc/source/install.rst b/docs/install.rst
similarity index 97%
rename from doc/source/install.rst
rename to docs/install.rst
index e3f4497..55a1e00 100644
--- a/doc/source/install.rst
+++ b/docs/install.rst
@@ -75,9 +75,9 @@ In the M-LOOP source code directory. The tests should take around five minutes t
Documentation
-------------
-If you would also like a local copy of the documentation enter the doc folder and use the command::
+If you would also like a local copy of the documentation enter the docs folder and use the command::
make html
-Which will generate the documentation in doc/build/html.
+Which will generate the documentation in docs/_build/html.
diff --git a/doc/source/interfaces.rst b/docs/interfaces.rst
similarity index 100%
rename from doc/source/interfaces.rst
rename to docs/interfaces.rst
diff --git a/doc/make.bat b/docs/make.bat
similarity index 94%
rename from doc/make.bat
rename to docs/make.bat
index e67a860..a2d3c82 100644
--- a/doc/make.bat
+++ b/docs/make.bat
@@ -5,9 +5,9 @@ REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
-set BUILDDIR=build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
-set I18NSPHINXOPTS=%SPHINXOPTS% source
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
diff --git a/doc/source/tutorial.rst b/docs/tutorial.rst
similarity index 93%
rename from doc/source/tutorial.rst
rename to docs/tutorial.rst
index 3664a00..8df958c 100644
--- a/doc/source/tutorial.rst
+++ b/docs/tutorial.rst
@@ -3,7 +3,7 @@
Tutorial
========
-Here we provide a tutorial on how to use M-LOOP to optimize a generic experiment. M-LOOP is flexible and can be customized with a variety of :ref:`sec-options` and :ref:`sec-interfaces`, it can be run from the command line or used as a :ref:`python API `. Here we introduce the basic settings to get M-LOOP up and running as quick as possible.
+Here we provide a tutorial on how to use M-LOOP to optimize a generic experiment. M-LOOP is flexible and can be customized with a variety of :ref:`options ` and :ref:`sec-interfaces`, it can be run from the command line or used as a :ref:`python API `. Here we introduce the basic settings to get M-LOOP up and running as quick as possible.
Overview
--------
@@ -12,7 +12,7 @@ The basic operation of M-LOOP is sketched below.
.. _fig-mloop-diag:
-.. figure:: images/M-LOOP_diagram.png
+.. figure:: _static/M-LOOP_diagram.png
:alt: M-LOOP in a loop with an experiment sending parameters and receiving costs.
There are three stages:
@@ -47,7 +47,7 @@ The configuration file contains a list of options and settings for the optimizat
[keyword] = [value]
-You can add comments to your file using #, everything past # will be ignored. Examples of relevant keywords and syntax for the values is provided in :ref:`sec-examples` and a comprehensive list of options is described in :ref:`sec-options`. The values should be formatted with python syntax, strings should be surrounded with single or double quotes and arrays of values can be surrounded with square brackets/parentheses with numbers separated with commas. In this tutorial we will examine the example file *tutoral_config.txt*::
+You can add comments to your file using #, everything past # will be ignored. Examples of relevant keywords and syntax for the values is provided in :ref:`sec-examples` and a comprehensive list of options is described in :ref:`sec-examples`. The values should be formatted with python syntax, strings should be surrounded with single or double quotes and arrays of values can be surrounded with square brackets/parentheses with numbers separated with commas. In this tutorial we will examine the example file *tutoral_config.txt*::
#Tutorial Config
#---------------
@@ -110,7 +110,7 @@ If you do not want one of the halting conditions, simply delete it from your fil
Learner specific options
~~~~~~~~~~~~~~~~~~~~~~~~
-There are many learner specific options (and different learner algorithms) described in :ref:`sec-options`. Here we consider just a couple of the most commonly used ones. M-LOOP has been designed to find an optimum quickly with no custom configuration as long as the experiment is able to provide a cost for every parameter it provides.
+There are many learner specific options (and different learner algorithms) described in :ref:`sec-examples`. Here we consider just a couple of the most commonly used ones. M-LOOP has been designed to find an optimum quickly with no custom configuration as long as the experiment is able to provide a cost for every parameter it provides.
However if your experiment will fail to work if there are sudden and significant changes to your parameters you may need to set the following options::
diff --git a/docs/visualizations.rst b/docs/visualizations.rst
new file mode 100644
index 0000000..fd2c1ce
--- /dev/null
+++ b/docs/visualizations.rst
@@ -0,0 +1,47 @@
+.. _sec-visualizations:
+
+Visualizations
+==============
+
+At the end of an optimization run a set of visualizations will be produce by default.
+
+.. figure:: _static/M-LOOP_visualizations.png
+ :alt: Six visualizations of data produced by M-LOOP.
+
+ An example of the six visualizations automatically produced when M-LOOP is run with the default controller, the Gaussian process machine learner.
+
+The number of visualizations will depend on what controller you use. By default there should be six which are described below:
+
+- **Controller: Cost vs run number.** Here the returned by the experiment versus run number is plotted. The legend shows what algorithm was used to generate the parameters tested by the experiment. If you use the Gaussian process, there will also be another algorithm used throughout the optimization algorithm in order to (a) ensure parameters are generated fast enough and (b) add new prior free data to ensure the Gaussian process converges to the correct model.
+
+- **Controller: Parameters vs run number.** The parameters values are all plotted against the run number. Note the parameters will all be scaled between their minimum and maximum value. the legend indicates what color corresponds to what parameter.
+
+- **Controller: Cost vs parameters.** The cost versus the parameters. Here each of the parameters tested are plotted against the cost they returned as a set. Again the parameter values are all scaled between their minimum and maximum values.
+
+- **GP Learner: Predicted landscape.** 1D cross sections of the landscape about the best recorded cost are plotted against each parameter. The color of the cross section corresponds to the parameter that is varied in the cross section. This predicted landscape is generated by the model fit to the experiment by the Gaussian process. Be sure to check after an optimization run that all parameters contributed. If one parameter produces a flat cross section, it is most likely it did not have any influence on the final cost. You may want to remove it on the next optimization run.
+
+- **GP Learner: Log of length scales vs fit number.** The Gaussian process fits a correlation length to each of the parameters in the experiment. Here we see a plot of the correlation lengths versus fit number. The last correlation lengths (highest fit number) is the most reliable values. Correlation lengths indicate how sensitive the cost is to changes in these parameters. If the correlation length is large, the parameter has a very little influence on the cost, if the correlation length is small, the parameter will have a very large influence on the cost. The correlation lengths are not precisely estimate. They should only be trusted accurate to +/- an order of magnitude. If a parameter has an extremely large value at the end of the optimization, say 5 or more, it is unlikely to have much affect on the cost and should be removed on the next optimization run.
+
+- **GP Learner: Noise level vs fit number.** This is the estimated noise in the costs as a function of fit number. The most reliable estimate of the noise level will be the last value (highest fit number). The noise level is useful for quantifying the intrinsic noise and uncertainty in your cost value. Most other optimization algorithms will not provide this estimate. The noise level estimate may be helpful when isolating what part of your system can be optimized and what part is due to random fluctuations.
+
+The plots which start with *Controller:* are generated from the controller archive, while plots that start with *Learner:* are generated from the learner archive.
+
+Reproducing visualizations
+--------------------------
+
+If you have a controller and learner archive and would like to examine the visualizations again, it is best to do so using the :ref:`sec-api`. For example the following code will plot the visualizations again from the files *controller_archive_2016-08-23_13-59.mat* and *learner_archive_2016-08-18_12-18.pkl*::
+
+ import mloop.visualizations as mlv
+ import matplotlib.pyplot as plt
+
+ mlv.configure_plots()
+ mlv.create_contoller_visualizations('controller_archive_2016-08-23_13-59.mat',file_type='mat')
+ mlv.create_gaussian_process_learner_visualizations('learner_archive_2016-08-18_12-18.pkl',file_type='pkl')
+
+ plt.show()
+
+
+
+
+
+
diff --git a/examples/complete_controller_config.txt b/examples/complete_controller_config.txt
deleted file mode 100644
index 4e167a1..0000000
--- a/examples/complete_controller_config.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-#Controller Options
-#-----------------
-
-#General options
-max_num_runs = 1000 #number of planned runs
-target_cost = 0.1 #cost to beat
-max_num_runs_without_better_params = 10 #max allowed number of runs between finding better parameters
-controller_archive_filename = 'test' #filename prefix for controller archive
-controller_archive_file_type = 'mat' #file_type for controller archive
-archive_extra_dict = {'test':'this_is'} #dictionary of any data to be put in archive
diff --git a/examples/controller_config.txt b/examples/controller_config.txt
new file mode 100644
index 0000000..2c226dc
--- /dev/null
+++ b/examples/controller_config.txt
@@ -0,0 +1,19 @@
+#General Controller Options
+#--------------------------
+
+#Halting conditions
+max_num_runs = 1000 #number of planned runs
+target_cost = 0.1 #cost to beat
+max_num_runs_without_better_params = 100 #max allowed number of runs between finding better parameters
+
+#Parameter controls
+num_params = 2 #Number of parameters
+min_boundary = [0,0] #Minimum value for each parameter
+max_boundary = [2,2] #Maximum value for each parameter
+
+#Filename related
+controller_archive_filename = 'agogo' #filename prefix for controller archive
+controller_archive_file_type = 'mat' #file_type for controller archive
+learner_archive_filename = 'ogoga' #filename prefix for learner archive
+learner_archive_file_type = 'pkl' #file_type for learner archive
+archive_extra_dict = {'test':'this_is'} #dictionary of any extra data to be put in archive
\ No newline at end of file
diff --git a/examples/complete_extras_config.txt b/examples/extras_config.txt
similarity index 54%
rename from examples/complete_extras_config.txt
rename to examples/extras_config.txt
index f85f8b7..7a3ff48 100644
--- a/examples/complete_extras_config.txt
+++ b/examples/extras_config.txt
@@ -1,3 +1,4 @@
-#Extras
-#-----
+#Extra Options
+#-------------
+
visualizations=False #whether plots should be presented after run
\ No newline at end of file
diff --git a/examples/file_interface_config.txt b/examples/file_interface_config.txt
new file mode 100644
index 0000000..9412123
--- /dev/null
+++ b/examples/file_interface_config.txt
@@ -0,0 +1,7 @@
+#File Interface Options
+#----------------------
+
+interface_type = 'file' #The type of interface
+interface_out_filename = 'exp_input' #The filename of the file output by the interface and input into the experiment
+interface_in_filename = 'exp_output' #The filename o the file input into the interface and output by the experiment
+interface_file_type = 'txt' #The file_type of both the input and output files, can be 'txt', 'pkl' or 'mat'.
diff --git a/examples/complete_gaussian_process_config.txt b/examples/gaussian_process_complete_config.txt
similarity index 95%
rename from examples/complete_gaussian_process_config.txt
rename to examples/gaussian_process_complete_config.txt
index aa5b9ef..a4bf1eb 100644
--- a/examples/complete_gaussian_process_config.txt
+++ b/examples/gaussian_process_complete_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#------------------
+#Gaussian Process Complete Options
+#---------------------------------
#General options
max_num_runs = 100 #number of planned runs
diff --git a/examples/simple_gaussian_process_config.txt b/examples/gaussian_process_simple_config.txt
similarity index 89%
rename from examples/simple_gaussian_process_config.txt
rename to examples/gaussian_process_simple_config.txt
index 0fbb7de..a5f0c62 100644
--- a/examples/simple_gaussian_process_config.txt
+++ b/examples/gaussian_process_simple_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#-----------------
+#Gaussian Process Basic Options
+#------------------------------
#General options
max_num_runs = 100 #number of planned runs
diff --git a/examples/complete_logging_config.txt b/examples/logging_config.txt
similarity index 100%
rename from examples/complete_logging_config.txt
rename to examples/logging_config.txt
diff --git a/examples/complete_nelder_mead_config.txt b/examples/nelder_mead_complete_config.txt
similarity index 91%
rename from examples/complete_nelder_mead_config.txt
rename to examples/nelder_mead_complete_config.txt
index a8269b5..26243a3 100644
--- a/examples/complete_nelder_mead_config.txt
+++ b/examples/nelder_mead_complete_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#-----------------
+#Nelder-Mead Complete Options
+#----------------------------
#General options
max_num_runs = 100 #number of planned runs
diff --git a/examples/simple_nelder_mead_config.txt b/examples/nelder_mead_simple_config.txt
similarity index 88%
rename from examples/simple_nelder_mead_config.txt
rename to examples/nelder_mead_simple_config.txt
index 24b3738..b038981 100644
--- a/examples/simple_nelder_mead_config.txt
+++ b/examples/nelder_mead_simple_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#-----------------
+#Nelder-Mead Basic Options
+#-------------------------
#General options
max_num_runs = 100 #number of planned runs
diff --git a/examples/complete_random_config.txt b/examples/random_complete_config.txt
similarity index 89%
rename from examples/complete_random_config.txt
rename to examples/random_complete_config.txt
index 64b80a8..f6f5889 100644
--- a/examples/complete_random_config.txt
+++ b/examples/random_complete_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#------------------
+#Random Complete Options
+#-----------------------
#General options
max_num_runs = 20 #number of planned runs
diff --git a/examples/simple_random_config.txt b/examples/random_simple_config.txt
similarity index 90%
rename from examples/simple_random_config.txt
rename to examples/random_simple_config.txt
index df94ba5..9e9d85a 100644
--- a/examples/simple_random_config.txt
+++ b/examples/random_simple_config.txt
@@ -1,5 +1,5 @@
-#Controller Options
-#-----------------
+#Random Basic Options
+#--------------------
#General options
max_num_runs = 10 #number of planned runs
diff --git a/mloop/__init__.py b/mloop/__init__.py
index 1f34358..4cb1b1e 100644
--- a/mloop/__init__.py
+++ b/mloop/__init__.py
@@ -10,7 +10,7 @@
import os
-__version__= "2.0.1"
+__version__= "2.0.2"
__all__ = ['controllers','interfaces','launchers','learners','testing','utilities','visualizations']
#Add a null handler in case the user does not run config_logger() before running the optimization
diff --git a/mloop/controllers.py b/mloop/controllers.py
index 0e74707..58e4808 100644
--- a/mloop/controllers.py
+++ b/mloop/controllers.py
@@ -257,8 +257,8 @@ def _put_params_and_out_dict(self, params, param_type=None, **kwargs):
self.out_extras.append(kwargs)
if param_type is not None:
self.out_type.append(param_type)
- self.log.debug('Controller params=' + repr(params))
- self.log.debug('Put params num:' + repr(self.num_out_params ))
+ self.log.info('params ' + str(params))
+ #self.log.debug('Put params num:' + repr(self.num_out_params ))
def _get_cost_and_in_dict(self):
'''
@@ -302,8 +302,11 @@ def _get_cost_and_in_dict(self):
self.best_index = self.num_in_costs
self.best_params = self.curr_params
self.num_last_best_cost = 0
- self.log.debug('Controller cost=' + repr(self.curr_cost))
- self.log.debug('Got cost num:' + repr(self.num_in_costs))
+ if self.curr_bad:
+ self.log.info('bad run')
+ else:
+ self.log.info('cost ' + str(self.curr_cost) + ' +/- ' + str(self.curr_uncer))
+ #self.log.debug('Got cost num:' + repr(self.num_in_costs))
def save_archive(self):
'''
@@ -384,13 +387,13 @@ def print_results(self):
'''
Print results from optimization run to the logs
'''
- self.log.debug('Optimization ended because:')
+ self.log.info('Optimization ended because:-')
if self.num_in_costs >= self.max_num_runs:
- self.log.debug('Maximum number of runs reached.')
+ self.log.info('Maximum number of runs reached.')
if self.best_cost <= self.target_cost:
- self.log.debug('Target cost reached.')
+ self.log.info('Target cost reached.')
if self.num_last_best_cost >= self.max_num_runs_without_better_params:
- self.log.debug('Maximum number of runs without better params reached.')
+ self.log.info('Maximum number of runs without better params reached.')
self.log.info('Results:-')
self.log.info('Best parameters found:' + str(self.best_params))
self.log.info('Best cost returned:' + str(self.best_cost) + ' +/- ' + str(self.best_uncer))
@@ -402,15 +405,15 @@ def _optimization_routine(self):
'''
self.log.debug('Start controller loop.')
try:
+ self.log.info('Run:' + str(self.num_in_costs +1))
next_params = self._first_params()
self._put_params_and_out_dict(next_params)
- self.log.info('Run:' + str(self.num_in_costs +1))
self.save_archive()
self._get_cost_and_in_dict()
while self.check_end_conditions():
+ self.log.info('Run:' + str(self.num_in_costs +1))
next_params = self._next_params()
self._put_params_and_out_dict(next_params)
- self.log.info('Run:' + str(self.num_in_costs +1))
self.save_archive()
self._get_cost_and_in_dict()
self.log.debug('End controller loop.')
@@ -524,7 +527,8 @@ def __init__(self, interface,
min_boundary=None,
max_boundary=None,
trust_region=None,
- learner_archive_filename = 'learner_archive',
+ learner_archive_filename = mll.default_learner_archive_filename,
+ learner_archive_file_type = mll.default_learner_archive_file_type,
**kwargs):
super().__init__(interface, **kwargs)
@@ -552,6 +556,7 @@ def __init__(self, interface,
max_boundary=max_boundary,
trust_region=trust_region,
learner_archive_filename=None,
+ learner_archive_file_type=learner_archive_file_type,
**self.remaining_kwargs)
elif self.training_type == 'nelder_mead':
@@ -560,6 +565,7 @@ def __init__(self, interface,
min_boundary=min_boundary,
max_boundary=max_boundary,
learner_archive_filename='training_learner_archive',
+ learner_archive_file_type=learner_archive_file_type,
**self.remaining_kwargs)
else:
self.log.error('Unknown training type provided to Gaussian process controller:' + repr(training_type))
@@ -573,6 +579,7 @@ def __init__(self, interface,
max_boundary=max_boundary,
trust_region=trust_region,
learner_archive_filename=learner_archive_filename,
+ learner_archive_file_type=learner_archive_file_type,
**self.remaining_kwargs)
self.gp_learner_params_queue = self.gp_learner.params_out_queue
@@ -636,6 +643,7 @@ def _optimization_routine(self):
super()._optimization_routine()
#Start last training run
+ self.log.info('Run:' + str(self.num_in_costs +1))
next_params = self._next_params()
self._put_params_and_out_dict(next_params)
@@ -644,13 +652,13 @@ def _optimization_routine(self):
self.log.debug('Starting GP optimization.')
self.new_params_event.set()
- self.log.info('Run:' + str(self.num_in_costs +1))
self.save_archive()
self._get_cost_and_in_dict()
gp_consec = 0
gp_count = 0
while self.check_end_conditions():
+ self.log.info('Run:' + str(self.num_in_costs +1))
if gp_consec==self.generation_num or (self.no_delay and self.gp_learner_params_queue.empty()):
next_params = self._next_params()
self._put_params_and_out_dict(next_params)
@@ -664,7 +672,6 @@ def _optimization_routine(self):
if gp_count%self.generation_num == 2:
self.new_params_event.set()
- self.log.info('Run:' + str(self.num_in_costs +1))
self.save_archive()
self._get_cost_and_in_dict()
diff --git a/mloop/learners.py b/mloop/learners.py
index da3262c..7c3d016 100644
--- a/mloop/learners.py
+++ b/mloop/learners.py
@@ -18,7 +18,7 @@
import sklearn.preprocessing as skp
learner_thread_count = 0
-default_learner_filename = 'learner_archive'
+default_learner_archive_filename = 'learner_archive'
default_learner_archive_file_type = 'txt'
class LearnerInterrupt(Exception):
@@ -58,7 +58,7 @@ def __init__(self,
num_params=None,
min_boundary=None,
max_boundary=None,
- learner_archive_filename=default_learner_filename,
+ learner_archive_filename=default_learner_archive_filename,
learner_archive_file_type=default_learner_archive_file_type,
start_datetime=None,
**kwargs):
@@ -182,15 +182,15 @@ def put_params_and_get_cost(self, params, **kwargs):
Returns:
cost from the cost queue
'''
- self.log.debug('Learner params='+repr(params))
+ #self.log.debug('Learner params='+repr(params))
if not self.check_num_params(params):
self.log.error('Incorrect number of parameters sent to queue.Params' + repr(params))
raise ValueError
if not self.check_in_boundary(params):
self.log.warning('Parameters sent to queue are not within boundaries. Params:' + repr(params))
- self.log.debug('Learner puts params.')
+ #self.log.debug('Learner puts params.')
self.params_out_queue.put(params)
- self.log.debug('Learner waiting for costs.')
+ #self.log.debug('Learner waiting for costs.')
self.save_archive()
while not self.end_event.is_set():
try:
@@ -202,7 +202,7 @@ def put_params_and_get_cost(self, params, **kwargs):
else:
self.log.debug('Learner end signal received. Ending')
raise LearnerInterrupt
- self.log.debug('Learner cost='+repr(cost))
+ #self.log.debug('Learner cost='+repr(cost))
return cost
def save_archive(self):
@@ -212,8 +212,6 @@ def save_archive(self):
self.update_archive()
if self.learner_archive_filename is not None:
mlu.save_dict_to_file(self.archive_dict, self.total_archive_filename, self.learner_archive_file_type)
- else:
- self.log.debug('Did not save archive file.')
def update_archive(self):
'''
@@ -1042,10 +1040,10 @@ def run(self):
'''
try:
while not self.end_event.is_set():
- self.log.debug('Learner waiting for new params event')
+ #self.log.debug('Learner waiting for new params event')
self.save_archive()
self.wait_for_new_params_event()
- self.log.debug('Gaussian process learner reading costs')
+ #self.log.debug('Gaussian process learner reading costs')
self.get_params_and_costs()
self.fit_gaussian_process()
for _ in range(self.generation_num):
diff --git a/mloop/utilities.py b/mloop/utilities.py
index 01d888f..ab4e3ce 100644
--- a/mloop/utilities.py
+++ b/mloop/utilities.py
@@ -23,6 +23,9 @@
mloop_path = os.path.dirname(mloop.__file__)
+#Set numpy to have no limit on printing to ensure all values are saved
+np.set_printoptions(threshold=np.inf)
+
def config_logger(**kwargs):
'''
Wrapper for _config_logger.
diff --git a/mloop/visualizations.py b/mloop/visualizations.py
index a71231f..be5718d 100644
--- a/mloop/visualizations.py
+++ b/mloop/visualizations.py
@@ -163,7 +163,7 @@ def plot_cost_vs_run(self):
plt.scatter(self.in_numbers,self.in_costs,marker='o',c=self.cost_colors,s=5*mpl.rcParams['lines.markersize'])
plt.xlabel(run_label)
plt.ylabel(cost_label)
- plt.title('Controller: Cost against number.')
+ plt.title('Controller: Cost vs run number.')
artists = []
for ut in self.unique_types:
artists.append(plt.Line2D((0,1),(0,0), color=_color_from_controller_name(ut), marker='o', linestyle=''))
@@ -187,7 +187,7 @@ def plot_parameters_vs_run(self):
plt.ylabel(run_label)
plt.xlabel(run_label)
- plt.title('Controller: Parameters against run number.')
+ plt.title('Controller: Parameters vs run number.')
artists=[]
for ind in range(self.num_params):
artists.append(plt.Line2D((0,1),(0,0), color=self.param_colors[ind],marker='o',linestyle=''))
@@ -217,7 +217,7 @@ def plot_parameters_vs_cost(self):
plt.plot(self.out_params[:,ind],self.in_costs,'o',color=self.param_colors[ind])
plt.xlabel(run_label)
plt.ylabel(cost_label)
- plt.title('Controller: Cost against parameters.')
+ plt.title('Controller: Cost vs parameters.')
artists=[]
for ind in range(self.num_params):
artists.append(plt.Line2D((0,1),(0,0), color=self.param_colors[ind],marker='o',linestyle=''))
@@ -395,7 +395,7 @@ def plot_all_minima_vs_cost(self):
plt.xlabel(scale_param_label)
plt.xlim((0,1))
plt.ylabel(cost_label)
- plt.title('GP Learner: Cost against parameters.')
+ plt.title('GP Learner: Cost vs parameters.')
artists = []
for ind in range(self.num_params):
artists.append(plt.Line2D((0,1),(0,0), color=self.param_colors[ind],marker='o',linestyle=''))
@@ -416,7 +416,7 @@ def plot_hyperparameters_vs_run(self):
plt.plot(self.fit_numbers,self.log_length_scale_history[:,ind],'o',color=self.param_colors[ind])
plt.xlabel(run_label)
plt.ylabel(log_length_scale_label)
- plt.title('GP Learner: Log_10 of lengths scales vs run number.')
+ plt.title('GP Learner: Log of lengths scales vs fit number.')
if scale_num!=1:
artists=[]
for ind in range(self.num_params):
@@ -430,5 +430,5 @@ def plot_hyperparameters_vs_run(self):
plt.plot(self.fit_numbers,self.noise_level_history,'o',color='k')
plt.xlabel(run_label)
plt.ylabel(noise_label)
- plt.title('GP Learner: Noise level vs run number.')
+ plt.title('GP Learner: Noise level vs fit number.')
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 3c47842..acf0ef7 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
license = 'MIT',
keywords = 'automated machine learning optimization optimisation science experiment quantum',
url = 'https://github.com/michaelhush/M-LOOP/',
- download_url = 'https://github.com/michaelhush/M-LOOP/tarball/v2.0.1',
+ download_url = 'https://github.com/michaelhush/M-LOOP/tarball/v2.0.2',
classifiers = ['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
diff --git a/tests/test_examples.py b/tests/test_examples.py
index a3f50f0..f01feed 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -24,51 +24,50 @@ def tearDownClass(cls):
cls.fake_experiment.end_event.set()
cls.fake_experiment.join()
- def test_complete_controller_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_controller_config.txt',
- num_params=1,
+ def test_controller_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/controller_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_complete_extras_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_extras_config.txt',
+ def test_extras_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/extras_config.txt',
num_params=1,
target_cost = 0.1,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_complete_logging_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_logging_config.txt',
+ def test_logging_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/logging_config.txt',
num_params=1,
target_cost = 0.1,
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_simple_random_config(self):
- _ = mll.launch_from_file(mlu.mloop_path+'/../examples/simple_random_config.txt',
+ def test_random_simple_config(self):
+ _ = mll.launch_from_file(mlu.mloop_path+'/../examples/random_simple_config.txt',
**self.override_dict)
- def test_complete_random_config(self):
- _ = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_random_config.txt',
+ def test_random_complete_config(self):
+ _ = mll.launch_from_file(mlu.mloop_path+'/../examples/random_complete_config.txt',
**self.override_dict)
- def test_simple_nelder_mead_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/simple_nelder_mead_config.txt',
+ def test_nelder_mead_simple_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_simple_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_complete_nelder_mead_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_nelder_mead_config.txt',
+ def test_nelder_mead_complete_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_complete_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_simple_gaussian_process_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/simple_gaussian_process_config.txt',
+ def test_gaussian_process_simple_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_simple_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)
- def test_complete_gaussian_process_config(self):
- controller = mll.launch_from_file(mlu.mloop_path+'/../examples/complete_gaussian_process_config.txt',
+ def test_gaussian_process_complete_config(self):
+ controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_complete_config.txt',
**self.override_dict)
self.asserts_for_cost_and_params(controller)