From 28ac93c937e2646b3ec925b21e882bbb5562231b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= Date: Sun, 30 Jun 2019 13:23:23 +0000 Subject: [PATCH 1/5] Fix the required parameters discrepancy --- talos/model/layers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/talos/model/layers.py b/talos/model/layers.py index 6577ce45..d01307b4 100644 --- a/talos/model/layers.py +++ b/talos/model/layers.py @@ -4,10 +4,9 @@ def hidden_layers(model, params, last_neuron): - '''HIDDEN LAYER Generator - NOTE: 'first_neuron', 'dropout', and 'hidden_layers' need + NOTE: 'shapes', 'first_neuron', 'dropout', and 'hidden_layers' need to be present in the params dictionary. Hidden layer generation for the cases where number @@ -56,7 +55,7 @@ def hidden_layers(model, params, last_neuron): bias_constraint = None # check for the params that are required for hidden_layers - for param in ['shapes', 'first_neuron', 'dropout']: + for param in ['shapes', 'first_neuron', 'dropout', 'hidden_layers']: try: params[param] except KeyError as err: From 7b8663d8eeb13babbca1417fcd5b9db1fd945814 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= Date: Sun, 30 Jun 2019 13:41:31 +0000 Subject: [PATCH 2/5] Fix hidden_layers complexity --- talos/model/layers.py | 73 ++++++++++--------------------------------- 1 file changed, 16 insertions(+), 57 deletions(-) diff --git a/talos/model/layers.py b/talos/model/layers.py index d01307b4..1f6d3210 100644 --- a/talos/model/layers.py +++ b/talos/model/layers.py @@ -14,67 +14,26 @@ def hidden_layers(model, params, last_neuron): Handles things in a way where any number of layers can be tried with matching hyperparameters.''' - try: - kernel_initializer = params['kernel_initializer'] - except KeyError: - kernel_initializer = 'glorot_uniform' - - try: - kernel_regularizer = params['kernel_regularizer'] - except KeyError: - kernel_regularizer = None - - try: - bias_initializer = params['bias_initializer'] - except KeyError: - bias_initializer = 'zeros' - - try: - bias_regularizer = params['bias_regularizer'] - except KeyError: - bias_regularizer = None - - try: - use_bias = params['use_bias'] - except KeyError: - use_bias = True - - try: - activity_regularizer = params['activity_regularizer'] - except KeyError: - activity_regularizer = None - - try: - kernel_constraint = params['kernel_constraint'] - except KeyError: - kernel_constraint = None - - try: - bias_constraint = params['bias_constraint'] - except KeyError: - bias_constraint = None - # check for the params that are required for hidden_layers for param in ['shapes', 'first_neuron', 'dropout', 'hidden_layers']: - try: - params[param] - except KeyError as err: - if err.args[0] == param: - raise TalosParamsError("hidden_layers requires '" + param + "' in params") + if param not in params: + raise TalosParamsError( + "hidden_layers requires '" + param + "' in params" + ) layer_neurons = network_shape(params, last_neuron) for i in range(params['hidden_layers']): - - model.add(Dense(layer_neurons[i], - activation=params['activation'], - use_bias=use_bias, - kernel_initializer=kernel_initializer, - kernel_regularizer=kernel_regularizer, - bias_initializer=bias_initializer, - bias_regularizer=bias_regularizer, - activity_regularizer=activity_regularizer, - kernel_constraint=kernel_constraint, - bias_constraint=bias_constraint)) - + model.add(Dense( + layer_neurons[i], + kernel_initializer=params.get('kernel_initializer', 'glorot_uniform'), + kernel_regularizer=params.get('kernel_regularizer'), + bias_initializer=params.get('bias_initializer', 'zeros'), + bias_regularizer=params.get('bias_regularizer'), + use_bias=params.get('use_bias', True), + activity_regularizer=params.get('activity_regularizer'), + kernel_constraint=params.get('kernel_constraint'), + bias_constraint=params.get('bias_constraint'), + activation=params.get('activation') + )) model.add(Dropout(params['dropout'])) From 59c802c38e4fa918bef55ea65d960645910a740d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= Date: Sun, 30 Jun 2019 13:48:02 +0000 Subject: [PATCH 3/5] Make @pep8speaks stop crying --- talos/model/layers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/talos/model/layers.py b/talos/model/layers.py index 1f6d3210..bec89cea 100644 --- a/talos/model/layers.py +++ b/talos/model/layers.py @@ -26,7 +26,10 @@ def hidden_layers(model, params, last_neuron): for i in range(params['hidden_layers']): model.add(Dense( layer_neurons[i], - kernel_initializer=params.get('kernel_initializer', 'glorot_uniform'), + kernel_initializer=params.get( + 'kernel_initializer', + 'glorot_uniform' + ), kernel_regularizer=params.get('kernel_regularizer'), bias_initializer=params.get('bias_initializer', 'zeros'), bias_regularizer=params.get('bias_regularizer'), From d5c826b3c327648a908c0060c9fd5d3b30688622 Mon Sep 17 00:00:00 2001 From: Mikko Kotila Date: Tue, 27 Aug 2019 11:06:18 +0300 Subject: [PATCH 4/5] Handles PRs #292 and #379 - Implements a custom error if a non-supported optimizer is used with with lr_normalizer #292 - Fixed docs typo #379 --- docs/Analyze.md | 2 +- setup.py | 2 +- talos/__init__.py | 2 +- talos/model/hidden_layers.py | 3 +-- talos/model/normalizers.py | 10 ++++++---- test/commands/__init__.py | 1 + test/commands/test_lr_normalizer.py | 20 ++++++++++++++++++++ test_script.py | 1 + 8 files changed, 32 insertions(+), 9 deletions(-) create mode 100644 test/commands/test_lr_normalizer.py diff --git a/docs/Analyze.md b/docs/Analyze.md index 6374a277..4197479a 100644 --- a/docs/Analyze.md +++ b/docs/Analyze.md @@ -54,7 +54,7 @@ See docstrings for each function for a more detailed description. **`plot_bars`** A bar chart that allows up to 4 axis of data to be shown at once -**`plot_bars`** Kernel Destiny Estimation type histogram with support for 1 or 2 axis of data +**`plot_kde`** Kernel Destiny Estimation type histogram with support for 1 or 2 axis of data **`table`** A sortable dataframe with a given metric and hyperparameters diff --git a/setup.py b/setup.py index 79d2bc71..ce051120 100755 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ URL = 'http://autonom.io' LICENSE = 'MIT' DOWNLOAD_URL = 'https://github.com/autonomio/talos/' -VERSION = '0.6.3' +VERSION = '0.6.4' try: from setuptools import setup diff --git a/talos/__init__.py b/talos/__init__.py index c44bcaf8..fcb93e2d 100755 --- a/talos/__init__.py +++ b/talos/__init__.py @@ -34,4 +34,4 @@ del commands, scan, model, metrics, key del sub, keep_from_templates, template_sub, warnings -__version__ = "0.6.3" +__version__ = "0.6.4" diff --git a/talos/model/hidden_layers.py b/talos/model/hidden_layers.py index bec89cea..3d5f94ca 100644 --- a/talos/model/hidden_layers.py +++ b/talos/model/hidden_layers.py @@ -18,8 +18,7 @@ def hidden_layers(model, params, last_neuron): for param in ['shapes', 'first_neuron', 'dropout', 'hidden_layers']: if param not in params: raise TalosParamsError( - "hidden_layers requires '" + param + "' in params" - ) + "hidden_layers requires '" + param + "' in params") layer_neurons = network_shape(params, last_neuron) diff --git a/talos/model/normalizers.py b/talos/model/normalizers.py index a4defac8..5ac45e6c 100644 --- a/talos/model/normalizers.py +++ b/talos/model/normalizers.py @@ -1,7 +1,3 @@ -from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop -from keras.optimizers import Nadam - - def lr_normalizer(lr, optimizer): """Assuming a default learning rate 1, rescales the learning rate such that learning rates amongst different optimizers are more or less @@ -15,6 +11,10 @@ def lr_normalizer(lr, optimizer): The optimizer. For example, Adagrad, Adam, RMSprop. """ + from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop + from keras.optimizers import Nadam + from talos.utils.exceptions import TalosModelError + if optimizer == Adadelta: pass elif optimizer == SGD or optimizer == Adagrad: @@ -23,5 +23,7 @@ def lr_normalizer(lr, optimizer): lr /= 1000.0 elif optimizer == Adamax or optimizer == Nadam: lr /= 500.0 + else: + raise TalosModelError(str(optimizer) + " is not supported by lr_normalizer") return lr diff --git a/test/commands/__init__.py b/test/commands/__init__.py index f1c3938e..1e871555 100644 --- a/test/commands/__init__.py +++ b/test/commands/__init__.py @@ -5,3 +5,4 @@ from .test_analyze import test_analyze from .test_random_methods import test_random_methods from .test_rest import test_rest +from .test_lr_normalizer import test_lr_normalizer diff --git a/test/commands/test_lr_normalizer.py b/test/commands/test_lr_normalizer.py new file mode 100644 index 00000000..38baabc9 --- /dev/null +++ b/test/commands/test_lr_normalizer.py @@ -0,0 +1,20 @@ +def test_lr_normalizer(): + '''Test learning rate normalizer to confirm an invalid type is + recognized and throws TalosModelError.''' + + from talos.model.normalizers import lr_normalizer + from talos.utils.exceptions import TalosModelError + + print('Testing lr_normalizer() and invalid optimizer type...') + + # Using string as proxy for any invalid class + # (ex., tensorflow-sourced optimizer) + bad_optimizer = 'test' + + try: + lr_normalizer(1, bad_optimizer) + except TalosModelError: + print('Invalid model optimizer caught successfully!') + pass + else: + print('Invalid (string) model optimizer type not caught.') diff --git a/test_script.py b/test_script.py index 801f9f0a..0447524c 100644 --- a/test_script.py +++ b/test_script.py @@ -11,6 +11,7 @@ scan_object = test_scan() test_analyze(scan_object) test_random_methods() + test_lr_normalizer() test_rest(scan_object) print("\n All tests successfully completed :) Good work. \n ") From 61d5237e9a490be0da9c261eae65338f6445977f Mon Sep 17 00:00:00 2001 From: Mikko Kotila Date: Tue, 3 Sep 2019 14:48:16 +0300 Subject: [PATCH 5/5] Updated PR template --- .github/PULL_REQUEST_TEMPLATE.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ffbf40f5..f01c88bd 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,12 +3,16 @@ Thanks so much :) First, please take a moment to carefully check through the below items: -- [ ] Changes have gone through actual use testing +#### Sanity + +- [ ] I'm aware of the implications of the proposed changes - [ ] [Docs](https://autonomio.github.io/talos) are updated where relevant - [ ] Code is [PEP8](https://www.python.org/dev/peps/pep-0008/) + +#### Tests + +- [ ] Changes have gone through actual use testing - [ ] All local tests have passed (run ./test.sh in /talos) -- [ ] Travis tests have passed -- [ ] Open a pull request -- [ ] PR is to daily-dev branch +- [ ] Tests have been updated to reflect the changes