diff --git a/docs/interfaces.rst b/docs/interfaces.rst index 497348e..926fb9d 100644 --- a/docs/interfaces.rst +++ b/docs/interfaces.rst @@ -54,7 +54,7 @@ Shell interface The shell interface is used when experiments can be run from a command in a shell. M-LOOP will still need to be configured and executed in the same manner described for a file interface as describe in :ref:`tutorial `. The only difference is how M-LOOP starts the experiment and reads data. To use this interface you must include the following options:: - interface='shell' + interface_type='shell' command='./run_exp' params_args_type='direct' diff --git a/docs/tutorials.rst b/docs/tutorials.rst index 4fdefb8..a7d0dd9 100644 --- a/docs/tutorials.rst +++ b/docs/tutorials.rst @@ -31,7 +31,7 @@ There are three stages: M-LOOP - M-LOOP first looks for the configuration file *exp_input.txt*, which contains options like the number of parameters and their limits, in the folder it is executed, then starts the optimization process. + M-LOOP first looks for the configuration file *exp_config.txt*, which contains options like the number of parameters and their limits, in the folder it is executed, then starts the optimization process. 2. M-LOOP controls and optimizes the experiment by exchanging files written to disk. M-LOOP produces a file called *exp_input.txt* which contains a variable params with the next parameters to be run by the experiment. The experiment is expected to run an experiment with these parameters and measure the resultant cost. The experiment should then write the file *exp_output.txt* which contains at least the variable cost which quantifies the performance of that experimental run, and optionally, the variables uncer (for uncertainty) and bad (if the run failed). This process is repeated many times until the halting condition is met. diff --git a/examples/shell_interface_config.txt b/examples/shell_interface_config.txt index e988077..7fa786e 100644 --- a/examples/shell_interface_config.txt +++ b/examples/shell_interface_config.txt @@ -3,4 +3,4 @@ interface_type = 'shell' #The type of interface command = 'python shell_script.py' #The command for the command line to run the experiment to get a cost from the parameters -params_args_type = 'direct' #The format of the parameters when providing them on the command line. 'direct' simply appends them, e.g. python CLIscript.py 7 2 1, 'named' names each parameter, e.g. python CLIscript.py --param1 7 --param2 2 --param3 1 \ No newline at end of file +params_args_type = 'direct' #The format of the parameters when providing them on the command line. 'direct' simply appends them, e.g. python shell_script.py 7 2 1, 'named' names each parameter, e.g. python shell_script.py --param1 7 --param2 2 --param3 1 \ No newline at end of file diff --git a/mloop/controllers.py b/mloop/controllers.py index 691e51b..d018367 100644 --- a/mloop/controllers.py +++ b/mloop/controllers.py @@ -675,7 +675,7 @@ def _optimization_routine(self): ''' Overrides _optimization_routine. Uses the parent routine for the training runs. Implements a customized _optimization_rountine when running the Gaussian Process learner. ''' - #Run the training runs using the standard optimization routine. Adjust the number of max_runs + #Run the training runs using the standard optimization routine. self.log.debug('Starting training optimization.') self.log.info('Run:' + str(self.num_in_costs +1)) next_params = self._first_params() diff --git a/mloop/learners.py b/mloop/learners.py index 9f7cea7..b4e8b76 100644 --- a/mloop/learners.py +++ b/mloop/learners.py @@ -919,12 +919,12 @@ def __init__(self, #Basic optimization settings num_params = int(self.training_dict['num_params']) - min_boundary = np.squeeze(np.array(self.training_dict['min_boundary'], dtype=float)) - max_boundary = np.squeeze(np.array(self.training_dict['max_boundary'], dtype=float)) + min_boundary = mlu.safe_cast_to_array(self.training_dict['min_boundary']) + max_boundary = mlu.safe_cast_to_array(self.training_dict['max_boundary']) #Configuration of the learner self.cost_has_noise = bool(self.training_dict['cost_has_noise']) - self.length_scale = np.squeeze(np.array(self.training_dict['length_scale'])) + self.length_scale = mlu.safe_cast_to_array(self.training_dict['length_scale']) self.length_scale_history = list(self.training_dict['length_scale_history']) self.noise_level = float(self.training_dict['noise_level']) self.noise_level_history = mlu.safe_cast_to_list(self.training_dict['noise_level_history']) @@ -935,44 +935,39 @@ def __init__(self, self.params_count = int(self.training_dict['params_count']) #Data from previous experiment - self.all_params = np.array(self.training_dict['all_params'], dtype=float) - self.all_costs = np.squeeze(np.array(self.training_dict['all_costs'], dtype=float)) - self.all_uncers = np.squeeze(np.array(self.training_dict['all_uncers'], dtype=float)) + self.all_params = np.array(self.training_dict['all_params']) + self.all_costs = mlu.safe_cast_to_array(self.training_dict['all_costs']) + self.all_uncers = mlu.safe_cast_to_array(self.training_dict['all_uncers']) self.bad_run_indexs = mlu.safe_cast_to_list(self.training_dict['bad_run_indexs']) #Derived properties self.best_cost = float(self.training_dict['best_cost']) - self.best_params = np.squeeze(np.array(self.training_dict['best_params'], dtype=float)) + self.best_params = mlu.safe_cast_to_array(self.training_dict['best_params']) self.best_index = int(self.training_dict['best_index']) self.worst_cost = float(self.training_dict['worst_cost']) self.worst_index = int(self.training_dict['worst_index']) self.cost_range = float(self.training_dict['cost_range']) try: - self.predicted_best_parameters = np.squeeze(np.array(self.training_dict['predicted_best_parameters'])) + self.predicted_best_parameters = mlu.safe_cast_to_array(self.training_dict['predicted_best_parameters']) self.predicted_best_cost = float(self.training_dict['predicted_best_cost']) self.predicted_best_uncertainty = float(self.training_dict['predicted_best_uncertainty']) self.has_global_minima = True except KeyError: self.has_global_minima = False try: - self.local_minima_parameters = list(self.training_dict['local_minima_parameters']) - - if isinstance(self.training_dict['local_minima_costs'], np.ndarray): - self.local_minima_costs = list(np.squeeze(self.training_dict['local_minima_costs'])) - else: - self.local_minima_costs = list(self.training_dict['local_minima_costs']) - if isinstance(self.training_dict['local_minima_uncers'], np.ndarray): - self.local_minima_uncers = list(np.squeeze(self.training_dict['local_minima_uncers'])) - else: - self.local_minima_uncers = list(self.training_dict['local_minima_uncers']) + self.local_minima_parameters = mlu.safe_cast_to_list(self.training_dict['local_minima_parameters']) + self.local_minima_costs = mlu.safe_cast_to_list(self.training_dict['local_minima_costs']) + self.local_minima_uncers = mlu.safe_cast_to_list(self.training_dict['local_minima_uncers']) self.has_local_minima = True except KeyError: self.has_local_minima = False - - super(GaussianProcessLearner,self).__init__(num_params=num_params, + if 'num_params' in kwargs: + super(GaussianProcessLearner,self).__init__(**kwargs) + else: + super(GaussianProcessLearner,self).__init__(num_params=num_params, min_boundary=min_boundary, max_boundary=max_boundary, **kwargs) diff --git a/mloop/utilities.py b/mloop/utilities.py index 1d5a1cf..2ec4b26 100644 --- a/mloop/utilities.py +++ b/mloop/utilities.py @@ -57,6 +57,8 @@ def _config_logger(log_filename = default_log_filename, file_log_level (Optional[int]) : Level of log output for file, default is logging.DEBUG = 10 console_log_level (Optional[int]) :Level of log output for console, defalut is logging.INFO = 20 + Returns: + dictionary: Dict with extra keywords not used by the logging configuration. ''' if not os.path.exists(log_foldername): os.makedirs(log_foldername) @@ -173,6 +175,25 @@ def check_file_type_supported(file_type): ''' return file_type == 'mat' or 'txt' or 'pkl' +def safe_cast_to_array(in_array): + ''' + Attempts to safely cast the input to an array. Takes care of border cases + + Args: + in_array (array or equivalent): The array (or otherwise) to be converted to a list. + + Returns: + array : array that has been squeezed and 0-D cases change to 1-D cases + + ''' + + out_array = np.squeeze(np.array(in_array)) + + if out_array.shape == (): + out_array = np.array([out_array[()]]) + + return out_array + def safe_cast_to_list(in_array): ''' Attempts to safely cast a numpy array to a list, if not a numpy array just casts to list on the object. diff --git a/mloop/visualizations.py b/mloop/visualizations.py index 9f47743..763b649 100644 --- a/mloop/visualizations.py +++ b/mloop/visualizations.py @@ -340,7 +340,7 @@ def plot_params_vs_generations(self): def create_gaussian_process_learner_visualizations(filename, file_type='pkl', plot_cross_sections=True, - plot_all_minima_vs_cost=True, + plot_all_minima_vs_cost=False, plot_hyperparameters_vs_run=True): ''' Runs the plots from a gaussian process learner file. @@ -351,7 +351,7 @@ def create_gaussian_process_learner_visualizations(filename, Keyword Args: file_type (Optional [string]): File type 'pkl' pickle, 'mat' matlab or 'txt' text. plot_cross_sections (Optional [bool]): If True plot predict landscape cross sections, else do not. Default True. - plot_all_minima_vs_cost (Optional [bool]): If True plot all minima parameters versus cost number, False does not. If None it will only make the plots if all minima were previously calculated. Default None. + plot_all_minima_vs_cost (Optional [bool]): If True plot all minima parameters versus cost number, False does not. If None it will only make the plots if all minima were previously calculated. Default False. ''' visualization = GaussianProcessVisualizer(filename, file_type=file_type) if plot_cross_sections: @@ -486,8 +486,7 @@ def plot_all_minima_vs_cost(self): ''' Produce figure of the all the local minima versus cost. ''' - if not self.has_all_minima: - self.find_all_minima() + self.find_all_minima() global figure_counter, legend_loc figure_counter += 1 plt.figure(figure_counter) diff --git a/tests/test_units.py b/tests/test_units.py index a45e831..905143a 100644 --- a/tests/test_units.py +++ b/tests/test_units.py @@ -32,7 +32,7 @@ def test_max_num_runs(self): controller = mlc.create_controller(interface, max_num_runs = 5, target_cost = -1, - max_num_runs_without_better_params = 2) + max_num_runs_without_better_params = 10) controller.optimize() self.assertTrue(controller.best_cost == 1.) self.assertTrue(np.array_equiv(np.array(controller.in_costs),