Permalink
Browse files

Fixed one param visualization bug and typos in documentation

When optimizing one parameter, there were some issues reimporting the
saved files for the visualizations to work. This was due to the
problematic corner case of zero D or one D with one element arrays in
numpy. This has now been sanitized. Also fixed some critical typos in
the documentation.
  • Loading branch information...
1 parent 47c16bf commit 3bc037458862118d13703c13271cea3204c7377d @michaelhush committed Mar 29, 2017
Showing with 33 additions and 21 deletions.
  1. +1 −1 docs/interfaces.rst
  2. +1 −1 docs/tutorials.rst
  3. +1 −1 examples/shell_interface_config.txt
  4. +11 −18 mloop/learners.py
  5. +19 −0 mloop/utilities.py
View
@@ -54,7 +54,7 @@ Shell interface
The shell interface is used when experiments can be run from a command in a shell. M-LOOP will still need to be configured and executed in the same manner described for a file interface as describe in :ref:`tutorial <sec-standard-experiment>`. The only difference is how M-LOOP starts the experiment and reads data. To use this interface you must include the following options::
- interface='shell'
+ interface_type='shell'
command='./run_exp'
params_args_type='direct'
View
@@ -31,7 +31,7 @@ There are three stages:
M-LOOP
- M-LOOP first looks for the configuration file *exp_input.txt*, which contains options like the number of parameters and their limits, in the folder it is executed, then starts the optimization process.
+ M-LOOP first looks for the configuration file *exp_config.txt*, which contains options like the number of parameters and their limits, in the folder it is executed, then starts the optimization process.
2. M-LOOP controls and optimizes the experiment by exchanging files written to disk. M-LOOP produces a file called *exp_input.txt* which contains a variable params with the next parameters to be run by the experiment. The experiment is expected to run an experiment with these parameters and measure the resultant cost. The experiment should then write the file *exp_output.txt* which contains at least the variable cost which quantifies the performance of that experimental run, and optionally, the variables uncer (for uncertainty) and bad (if the run failed). This process is repeated many times until the halting condition is met.
@@ -3,4 +3,4 @@
interface_type = 'shell' #The type of interface
command = 'python shell_script.py' #The command for the command line to run the experiment to get a cost from the parameters
-params_args_type = 'direct' #The format of the parameters when providing them on the command line. 'direct' simply appends them, e.g. python CLIscript.py 7 2 1, 'named' names each parameter, e.g. python CLIscript.py --param1 7 --param2 2 --param3 1
+params_args_type = 'direct' #The format of the parameters when providing them on the command line. 'direct' simply appends them, e.g. python shell_script.py 7 2 1, 'named' names each parameter, e.g. python shell_script.py --param1 7 --param2 2 --param3 1
View
@@ -919,12 +919,12 @@ def __init__(self,
#Basic optimization settings
num_params = int(self.training_dict['num_params'])
- min_boundary = np.squeeze(np.array(self.training_dict['min_boundary'], dtype=float))
- max_boundary = np.squeeze(np.array(self.training_dict['max_boundary'], dtype=float))
+ min_boundary = mlu.safe_cast_to_array(self.training_dict['min_boundary'])
+ max_boundary = mlu.safe_cast_to_array(self.training_dict['max_boundary'])
#Configuration of the learner
self.cost_has_noise = bool(self.training_dict['cost_has_noise'])
- self.length_scale = np.squeeze(np.array(self.training_dict['length_scale']))
+ self.length_scale = mlu.safe_cast_to_array(self.training_dict['length_scale'])
self.length_scale_history = list(self.training_dict['length_scale_history'])
self.noise_level = float(self.training_dict['noise_level'])
self.noise_level_history = mlu.safe_cast_to_list(self.training_dict['noise_level_history'])
@@ -935,37 +935,30 @@ def __init__(self,
self.params_count = int(self.training_dict['params_count'])
#Data from previous experiment
- self.all_params = np.array(self.training_dict['all_params'], dtype=float)
- self.all_costs = np.squeeze(np.array(self.training_dict['all_costs'], dtype=float))
- self.all_uncers = np.squeeze(np.array(self.training_dict['all_uncers'], dtype=float))
+ self.all_params = np.array(self.training_dict['all_params'])
+ self.all_costs = mlu.safe_cast_to_array(self.training_dict['all_costs'])
+ self.all_uncers = mlu.safe_cast_to_array(self.training_dict['all_uncers'])
self.bad_run_indexs = mlu.safe_cast_to_list(self.training_dict['bad_run_indexs'])
#Derived properties
self.best_cost = float(self.training_dict['best_cost'])
- self.best_params = np.squeeze(np.array(self.training_dict['best_params'], dtype=float))
+ self.best_params = mlu.safe_cast_to_array(self.training_dict['best_params'])
self.best_index = int(self.training_dict['best_index'])
self.worst_cost = float(self.training_dict['worst_cost'])
self.worst_index = int(self.training_dict['worst_index'])
self.cost_range = float(self.training_dict['cost_range'])
try:
- self.predicted_best_parameters = np.squeeze(np.array(self.training_dict['predicted_best_parameters']))
+ self.predicted_best_parameters = mlu.safe_cast_to_array(self.training_dict['predicted_best_parameters'])
self.predicted_best_cost = float(self.training_dict['predicted_best_cost'])
self.predicted_best_uncertainty = float(self.training_dict['predicted_best_uncertainty'])
self.has_global_minima = True
except KeyError:
self.has_global_minima = False
try:
- self.local_minima_parameters = list(self.training_dict['local_minima_parameters'])
-
- if isinstance(self.training_dict['local_minima_costs'], np.ndarray):
- self.local_minima_costs = list(np.squeeze(self.training_dict['local_minima_costs']))
- else:
- self.local_minima_costs = list(self.training_dict['local_minima_costs'])
- if isinstance(self.training_dict['local_minima_uncers'], np.ndarray):
- self.local_minima_uncers = list(np.squeeze(self.training_dict['local_minima_uncers']))
- else:
- self.local_minima_uncers = list(self.training_dict['local_minima_uncers'])
+ self.local_minima_parameters = mlu.safe_cast_to_list(self.training_dict['local_minima_parameters'])
+ self.local_minima_costs = mlu.safe_cast_to_list(self.training_dict['local_minima_costs'])
+ self.local_minima_uncers = mlu.safe_cast_to_list(self.training_dict['local_minima_uncers'])
self.has_local_minima = True
except KeyError:
View
@@ -175,6 +175,25 @@ def check_file_type_supported(file_type):
'''
return file_type == 'mat' or 'txt' or 'pkl'
+def safe_cast_to_array(in_array):
+ '''
+ Attempts to safely cast the input to an array. Takes care of border cases
+
+ Args:
+ in_array (array or equivalent): The array (or otherwise) to be converted to a list.
+
+ Returns:
+ array : array that has been squeezed and 0-D cases change to 1-D cases
+
+ '''
+
+ out_array = np.squeeze(np.array(in_array))
+
+ if out_array.shape == ():
+ out_array = np.array([out_array[()]])
+
+ return out_array
+
def safe_cast_to_list(in_array):
'''
Attempts to safely cast a numpy array to a list, if not a numpy array just casts to list on the object.

0 comments on commit 3bc0374

Please sign in to comment.