diff --git a/bin/M-LOOP b/bin/M-LOOP index 38e3048..3957d38 100644 --- a/bin/M-LOOP +++ b/bin/M-LOOP @@ -18,6 +18,7 @@ import sys import argparse import mloop as ml import mloop.launchers as mll +import multiprocessing as mp def main(argv): @@ -33,6 +34,7 @@ def main(argv): _ = mll.launch_from_file(config_filename) if __name__=="__main__": + mp.freeze_support() main(sys.argv[1:]) diff --git a/examples/gaussian_process_complete_config.txt b/examples/gaussian_process_complete_config.txt index a4bf1eb..2e50dbe 100644 --- a/examples/gaussian_process_complete_config.txt +++ b/examples/gaussian_process_complete_config.txt @@ -8,7 +8,7 @@ target_cost = 0.1 #cost to beat #Gaussian process options controller_type = 'gaussian_process' num_params = 2 #number of parameters -min_boundary = [-10.,-10.] #minimum boundary +min_boundary = [-10.,-10.] #minimum boundary max_boundary = [10.,10.] #maximum boundary length_scale = [1.0] #initial lengths scales for GP cost_has_noise = True #whether cost function has noise @@ -17,10 +17,11 @@ update_hyperparameters = True #whether noise level and lengths scales a trust_region = [5,5] #maximum move distance from best params default_bad_cost = 10 #default cost for bad run default_bad_uncertainty = 1 #default uncertainty for bad run -learner_archive_filename = 'a_word' #filename of gp archive -learner_archive_file_type = 'mat' #file type of archive +learner_archive_filename = 'a_word' #filename of gp archive +learner_archive_file_type = 'mat' #file type of archive predict_global_minima_at_end = True #find predicted global minima at end predict_local_minima_at_end = True #find all local minima of landscape at end +no_delay = True #whether to wait for the GP to make predictions or not. Default True (do not wait) #Training source options training_type = 'random' #training type can be random or nelder_mead diff --git a/examples/tutorial_config.txt b/examples/tutorial_config.txt index 79e5da8..112b504 100644 --- a/examples/tutorial_config.txt +++ b/examples/tutorial_config.txt @@ -13,7 +13,7 @@ target_cost = 0.01 #optimization halts when a cost below #Learner specific options first_params = [0.5,0.5] #first parameters to try -trust_region = 0.4 #maximum % move distance from best params +trust_region = 0.4 #maximum % move distance from best params #File format options interface_file_type = 'txt' #file types of *exp_input.mat* and *exp_output.mat* diff --git a/mloop/controllers.py b/mloop/controllers.py index e8f03e0..36410c8 100644 --- a/mloop/controllers.py +++ b/mloop/controllers.py @@ -678,10 +678,13 @@ def _shut_down(self): ''' self.log.debug('GP learner end set.') self.end_gp_learner.set() - self.gp_learner.join(self.gp_learner.learner_wait*3) + self.gp_learner.join() + #self.gp_learner.join(self.gp_learner.learner_wait*3) + ''' if self.gp_learner.is_alive(): - self.log.debug('GP Learner did not join in time had to terminate.') + self.log.warning('GP Learner did not join in time had to terminate.') self.gp_learner.terminate() + ''' self.log.debug('GP learner joined') last_dict = None while not self.gp_learner_params_queue.empty(): diff --git a/mloop/learners.py b/mloop/learners.py index 1657581..08673bb 100644 --- a/mloop/learners.py +++ b/mloop/learners.py @@ -296,7 +296,6 @@ def run(self): ''' Puts the next parameters on the queue which are randomly picked from a uniform distribution between the minimum and maximum boundaries when a cost is added to the cost queue. ''' - self.log.debug('Starting Random Learner') if self.first_params is None: next_params = self.min_boundary + nr.rand(self.num_params) * self.diff_boundary diff --git a/setup.cfg b/setup.cfg index 1dd2a0f..7f9083a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,4 +2,4 @@ description-file = README.rst [aliases] -test=pytest \ No newline at end of file +test=pytest diff --git a/setup.py b/setup.py index 1ca862d..b931326 100644 --- a/setup.py +++ b/setup.py @@ -3,44 +3,51 @@ ''' from __future__ import absolute_import, division, print_function +import multiprocessing as mp import mloop as ml from setuptools import setup, find_packages -setup( - name = 'M-LOOP', - version = ml.__version__, - packages = find_packages(), - scripts = ['./bin/M-LOOP'], - - setup_requires=['pytest-runner'], - install_requires = ['docutils>=0.3'], - tests_require=['pytest'], - package_data = { - # If any package contains *.txt or *.rst files, include them: - '': ['*.txt','*.md'], - }, - author = 'Michael R Hush', - author_email = 'MichaelRHush@gmail.com', - description = 'M-LOOP: Machine-learning online optimization package. A python package of automated optimization tools - enhanced with machine-learning - for quantum scientific experiments, computer controlled systems or other optimization tasks.', - license = 'MIT', - keywords = 'automated machine learning optimization optimisation science experiment quantum', - url = 'https://github.com/michaelhush/M-LOOP/', - download_url = 'https://github.com/michaelhush/M-LOOP/tarball/v2.0.2', +def main(): + setup( + name = 'M-LOOP', + version = ml.__version__, + packages = find_packages(), + scripts = ['./bin/M-LOOP'], + + setup_requires=['pytest-runner'], + install_requires = ['docutils>=0.3','numpy>=1.11','scipy>=0.17','matplotlib>=1.5','pytest>=2.9'], + tests_require=['pytest','setuptools>=26'], + + package_data = { + # If any package contains *.txt or *.rst files, include them: + '': ['*.txt','*.md'], + }, + author = 'Michael R Hush', + author_email = 'MichaelRHush@gmail.com', + description = 'M-LOOP: Machine-learning online optimization package. A python package of automated optimization tools - enhanced with machine-learning - for quantum scientific experiments, computer controlled systems or other optimization tasks.', + license = 'MIT', + keywords = 'automated machine learning optimization optimisation science experiment quantum', + url = 'https://github.com/michaelhush/M-LOOP/', + download_url = 'https://github.com/michaelhush/M-LOOP/tarball/v2.0.2', + + classifiers = ['Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Manufacturing', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: POSIX :: Linux', + 'Operating System :: Microsoft :: Windows', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: Implementation :: CPython', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Physics'] + ) - classifiers = ['Development Status :: 2 - Pre-Alpha', - 'Intended Audience :: Science/Research', - 'Intended Audience :: Manufacturing', - 'License :: OSI Approved :: MIT License', - 'Natural Language :: English', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: POSIX :: Linux', - 'Operating System :: Microsoft :: Windows', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: Implementation :: CPython', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Physics'] -) \ No newline at end of file +if __name__=='__main__': + mp.freeze_support() + main() \ No newline at end of file diff --git a/tests/test_examples.py b/tests/test_examples.py index 7d0cbce..2eef6fb 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -10,13 +10,14 @@ import mloop.utilities as mlu import logging import numpy as np +import multiprocessing class TestExamples(unittest.TestCase): @classmethod def setUpClass(cls): os.chdir(mlu.mloop_path + '/../tests') - cls.override_dict = {'file_log_level':logging.DEBUG,'console_log_level':logging.WARNING,'visualizations':False} + cls.override_dict = {'file_log_level':logging.DEBUG,'console_log_level':logging.DEBUG,'visualizations':False} cls.fake_experiment = mlt.FakeExperiment() cls.fake_experiment.start() @@ -56,7 +57,7 @@ def test_nelder_mead_simple_config(self): controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_simple_config.txt', **self.override_dict) self.asserts_for_cost_and_params(controller) - + def test_nelder_mead_complete_config(self): controller = mll.launch_from_file(mlu.mloop_path+'/../examples/nelder_mead_complete_config.txt', **self.override_dict) @@ -66,7 +67,7 @@ def test_gaussian_process_simple_config(self): controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_simple_config.txt', **self.override_dict) self.asserts_for_cost_and_params(controller) - + def test_gaussian_process_complete_config(self): controller = mll.launch_from_file(mlu.mloop_path+'/../examples/gaussian_process_complete_config.txt', **self.override_dict) @@ -80,6 +81,8 @@ def test_tutorial_config(self): def asserts_for_cost_and_params(self,controller): self.assertTrue(controller.best_cost<=controller.target_cost) self.assertTrue(np.sum(np.square(controller.best_params))<=controller.target_cost) - + + if __name__ == "__main__": + mp.freeze_support() unittest.main() \ No newline at end of file