Skip to content

Commit

Permalink
elu
Browse files Browse the repository at this point in the history
  • Loading branch information
pchavanne committed Jan 27, 2017
1 parent 8c82c8d commit 9f717bd
Show file tree
Hide file tree
Showing 6 changed files with 254 additions and 126 deletions.
8 changes: 6 additions & 2 deletions docs/modules/activations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,25 @@ Activation

.. autosummary::

get_activation
linear
sigmoid
ultra_fast_sigmoid
tanh
softmax
softplus
relu
linear
elu

Detailed description
--------------------

.. autofunction:: get_activation
.. autofunction:: linear
.. autofunction:: sigmoid
.. autofunction:: ultra_fast_sigmoid
.. autofunction:: tanh
.. autofunction:: softmax
.. autofunction:: softplus
.. autofunction:: relu
.. autofunction:: linear
.. autofunction:: elu
6 changes: 6 additions & 0 deletions docs/modules/layers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ by any new layer.
InputLayer
ReshapeLayer
FlattenLayer
Activation
DenseLayer
UnsupervisedLayer
LogisticRegression
Expand All @@ -28,6 +29,7 @@ by any new layer.
BatchNormalization
RNN
LSTM
GRU

.. inheritance-diagram:: yadll.layers

Expand All @@ -44,6 +46,8 @@ Detailed description
:members:
.. autoclass:: DenseLayer
:members:
.. autoclass:: Activation
:members:
.. autoclass:: UnsupervisedLayer
:members:
.. autoclass:: LogisticRegression
Expand All @@ -68,3 +72,5 @@ Detailed description
:members:
.. autoclass:: LSTM
:members:
.. autoclass:: GRU
:members:
4 changes: 2 additions & 2 deletions examples/normalization_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,13 @@
l1=hp.l1_reg, l2=hp.l2_reg, activation=yadll.activations.relu,
name='Hidden layer 1')
# Batch Normalization 2
l_bn2 = yadll.layers.BatchNormalization(incoming=l_hid1, name='Batch Normalization 1')
l_bn2 = yadll.layers.BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')
# Dense Layer 2
l_hid2 = yadll.layers.DenseLayer(incoming=l_bn2, nb_units=500, W=yadll.init.glorot_uniform,
l1=hp.l1_reg, l2=hp.l2_reg, activation=yadll.activations.relu,
name='Hidden layer 2')
# Batch Normalization 3
l_bn3 = yadll.layers.BatchNormalization(incoming=l_hid2, name='Batch Normalization 1')
l_bn3 = yadll.layers.BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')
# Logistic regression Layer
l_out = yadll.layers.LogisticRegression(incoming=l_bn3, nb_class=10, l1=hp.l1_reg,
l2=hp.l2_reg, name='Logistic regression')
Expand Down
42 changes: 37 additions & 5 deletions tests/test_activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,29 @@
dtype=yadll.utils.floatX)


def test_get_activation():
x = T.matrix('x')
activation = yadll.activations.get_activation(yadll.activations.relu)
f = theano.function([x], activation(x))
actual = f(x_val)
desired = x_val * (x_val > 0)
assert_allclose(actual, desired, rtol=1e-5)
x = T.matrix('x')
alpha = 0.5
activation = yadll.activations.get_activation((yadll.activations.relu, {'alpha': alpha}))
f = theano.function([x], activation(x))
actual = f(x_val)
desired = x_val * (x_val > 0) + alpha * x_val * (x_val < 0)
assert_allclose(actual, desired, rtol=1e-5)


def test_linear():
x = [0, -1, 1, 3.2, 1e-7, np.inf, True, None, 'foo']
actual = yadll.activations.linear(x)
desired = x
assert actual == desired


def test_sigmoid():
x = T.matrix('x')
f = theano.function([x], yadll.activations.sigmoid(x))
Expand Down Expand Up @@ -65,9 +88,18 @@ def test_relu():
assert_allclose(actual, desired, rtol=1e-5)


def test_linear():
x = [0, -1, 1, 3.2, 1e-7, np.inf, True, None, 'foo']
actual = yadll.activations.linear(x)
desired = x
assert actual == desired
def test_elu():
x = T.matrix('x')
f = theano.function([x], yadll.activations.elu(x))
actual = f(x_val)
desired = x_val * (x_val > 0) + (np.exp(x_val) - 1) * (x_val < 0)
assert_allclose(actual, desired, rtol=1e-5)
x = T.matrix('x')
alpha = 0.5
f = theano.function([x], yadll.activations.elu(x, alpha))
actual = f(x_val)
desired = x_val * (x_val > 0) + alpha * (np.exp(x_val) - 1) * (x_val < 0)
assert_allclose(actual, desired, rtol=1e-5)



56 changes: 50 additions & 6 deletions yadll/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,44 @@
import theano.tensor as T


def get_activation(activator):
"""
Call an activation function from an activator object
Parameters
----------
activator : `activator`
an activator is an activation function or the tuple of (activation function, dict of args)
example : activator = tanh or activator = (elu, {'alpha':0.5})
Returns
-------
an activation function with proper parameters
"""
if not isinstance(activator, tuple):
return activator
else:
return lambda x: activator[0](x, **activator[1])


def linear(x):
"""Linear activation function
:math:`\\varphi(x) = x`
Parameters
----------
x : symbolic tensor
Tensor to compute the activation function for.
Returns
-------
symbolic tensor
The output of the identity applied to the activation `x`.
"""
return x


def sigmoid(x):
"""Sigmoid function
:math:`\\varphi(x) = \\frac{1}{1 + e^{-x}}`
Expand Down Expand Up @@ -139,20 +177,26 @@ def relu(x, alpha=0):
return T.nnet.relu(x, alpha)


def linear(x):
"""Linear activation function
:math:`\\varphi(x) = x`
def elu(x, alpha=1):
"""
Compute the element-wise exponential linear activation function.
Parameters
----------
x : symbolic tensor
Tensor to compute the activation function for.
alpha : scalar
Returns
-------
symbolic tensor
The output of the identity applied to the activation `x`.
Element-wise exponential linear activation function applied to `x`.
References
-----
.. [1] Djork-Arne Clevert, Thomas Unterthiner, Sepp Hochreiter
"Fast and Accurate Deep Network Learning by
Exponential Linear Units (ELUs)" <http://arxiv.org/abs/1511.07289>`.
"""
return x

return T.nnet.elu(x, alpha)

0 comments on commit 9f717bd

Please sign in to comment.