Skip to content

Commit

Permalink
Merge pull request #392 from autonomio/daily-dev
Browse files Browse the repository at this point in the history
* Fix the required parameters discrepancy

* Fix hidden_layers complexity

* Make @pep8speaks stop crying

* Handles PRs #292 and #379

- Implements a custom error if a non-supported optimizer is used with with lr_normalizer #292
- Fixed docs typo #379

* Updated PR template
  • Loading branch information
mikkokotila committed Sep 3, 2019
2 parents b910989 + 61d5237 commit 0871f7e
Show file tree
Hide file tree
Showing 8 changed files with 63 additions and 74 deletions.
12 changes: 8 additions & 4 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,16 @@
Thanks so much :) First, please take a moment to carefully check through
the below items:

- [ ] Changes have gone through actual use testing
#### Sanity

- [ ] I'm aware of the implications of the proposed changes
- [ ] [Docs](https://autonomio.github.io/talos) are updated where relevant
- [ ] Code is [PEP8](https://www.python.org/dev/peps/pep-0008/)

#### Tests

- [ ] Changes have gone through actual use testing
- [ ] All local tests have passed (run ./test.sh in /talos)
- [ ] Travis tests have passed
- [ ] Open a pull request
- [ ] PR is to daily-dev branch
- [ ] Tests have been updated to reflect the changes

<hr>
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
URL = 'http://autonom.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/autonomio/talos/'
VERSION = '0.6.3'
VERSION = '0.6.4'

try:
from setuptools import setup
Expand Down
2 changes: 1 addition & 1 deletion talos/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@
del commands, scan, model, metrics, key
del sub, keep_from_templates, template_sub, warnings

__version__ = "0.6.3"
__version__ = "0.6.4"
89 changes: 25 additions & 64 deletions talos/model/hidden_layers.py
Original file line number Diff line number Diff line change
@@ -1,80 +1,41 @@
def hidden_layers(model, params, last_neuron):
from keras.layers import Dense, Dropout
from .network_shape import network_shape
from ..utils.exceptions import TalosParamsError


def hidden_layers(model, params, last_neuron):
'''HIDDEN LAYER Generator
NOTE: 'first_neuron', 'dropout', and 'hidden_layers' need
NOTE: 'shapes', 'first_neuron', 'dropout', and 'hidden_layers' need
to be present in the params dictionary.
Hidden layer generation for the cases where number
of layers is used as a variable in the optimization process.
Handles things in a way where any number of layers can be tried
with matching hyperparameters.'''

from keras.layers import Dense, Dropout
from .network_shape import network_shape
from ..utils.exceptions import TalosParamsError

try:
kernel_initializer = params['kernel_initializer']
except KeyError:
kernel_initializer = 'glorot_uniform'

try:
kernel_regularizer = params['kernel_regularizer']
except KeyError:
kernel_regularizer = None

try:
bias_initializer = params['bias_initializer']
except KeyError:
bias_initializer = 'zeros'

try:
bias_regularizer = params['bias_regularizer']
except KeyError:
bias_regularizer = None

try:
use_bias = params['use_bias']
except KeyError:
use_bias = True

try:
activity_regularizer = params['activity_regularizer']
except KeyError:
activity_regularizer = None

try:
kernel_constraint = params['kernel_constraint']
except KeyError:
kernel_constraint = None

try:
bias_constraint = params['bias_constraint']
except KeyError:
bias_constraint = None

# check for the params that are required for hidden_layers
for param in ['shapes', 'first_neuron', 'dropout']:
try:
params[param]
except KeyError as err:
if err.args[0] == param:
raise TalosParamsError("hidden_layers requires '" + param + "' in params")
for param in ['shapes', 'first_neuron', 'dropout', 'hidden_layers']:
if param not in params:
raise TalosParamsError(
"hidden_layers requires '" + param + "' in params")

layer_neurons = network_shape(params, last_neuron)

for i in range(params['hidden_layers']):

model.add(Dense(layer_neurons[i],
activation=params['activation'],
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint))

model.add(Dense(
layer_neurons[i],
kernel_initializer=params.get(
'kernel_initializer',
'glorot_uniform'
),
kernel_regularizer=params.get('kernel_regularizer'),
bias_initializer=params.get('bias_initializer', 'zeros'),
bias_regularizer=params.get('bias_regularizer'),
use_bias=params.get('use_bias', True),
activity_regularizer=params.get('activity_regularizer'),
kernel_constraint=params.get('kernel_constraint'),
bias_constraint=params.get('bias_constraint'),
activation=params.get('activation')
))
model.add(Dropout(params['dropout']))
10 changes: 6 additions & 4 deletions talos/model/normalizers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop
from keras.optimizers import Nadam


def lr_normalizer(lr, optimizer):
"""Assuming a default learning rate 1, rescales the learning rate
such that learning rates amongst different optimizers are more or less
Expand All @@ -15,6 +11,10 @@ def lr_normalizer(lr, optimizer):
The optimizer. For example, Adagrad, Adam, RMSprop.
"""

from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop
from keras.optimizers import Nadam
from talos.utils.exceptions import TalosModelError

if optimizer == Adadelta:
pass
elif optimizer == SGD or optimizer == Adagrad:
Expand All @@ -23,5 +23,7 @@ def lr_normalizer(lr, optimizer):
lr /= 1000.0
elif optimizer == Adamax or optimizer == Nadam:
lr /= 500.0
else:
raise TalosModelError(str(optimizer) + " is not supported by lr_normalizer")

return lr
1 change: 1 addition & 0 deletions test/commands/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@
from .test_analyze import test_analyze
from .test_random_methods import test_random_methods
from .test_rest import test_rest
from .test_lr_normalizer import test_lr_normalizer
20 changes: 20 additions & 0 deletions test/commands/test_lr_normalizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
def test_lr_normalizer():
'''Test learning rate normalizer to confirm an invalid type is
recognized and throws TalosModelError.'''

from talos.model.normalizers import lr_normalizer
from talos.utils.exceptions import TalosModelError

print('Testing lr_normalizer() and invalid optimizer type...')

# Using string as proxy for any invalid class
# (ex., tensorflow-sourced optimizer)
bad_optimizer = 'test'

try:
lr_normalizer(1, bad_optimizer)
except TalosModelError:
print('Invalid model optimizer caught successfully!')
pass
else:
print('Invalid (string) model optimizer type not caught.')
1 change: 1 addition & 0 deletions test_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
scan_object = test_scan()
test_analyze(scan_object)
test_random_methods()
test_lr_normalizer()
test_rest(scan_object)

print("\n All tests successfully completed :) Good work. \n ")

0 comments on commit 0871f7e

Please sign in to comment.