Skip to content

Commit

Permalink
Compile
Browse files Browse the repository at this point in the history
  • Loading branch information
pchavanne committed Feb 23, 2017
1 parent 01d690b commit a6bc7ca
Show file tree
Hide file tree
Showing 2 changed files with 200 additions and 23 deletions.
4 changes: 2 additions & 2 deletions examples/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ def rnn(input_var=None):

# Create connected layers
l_in = InputLayer(input_shape=(None, 28 * 28), input_var=input_var, name='Input')
l_rnn = RNN(incoming=l_in, n_hidden=100, n_out=28 * 28, name='Recurrent Neural Network')
l_rnn = RNN(incoming=l_in, n_units=100, name='Recurrent Neural Network')
l_out = LogisticRegression(incoming=l_rnn, n_class=10, name='Logistic regression')

# Create network and add layers
Expand All @@ -557,7 +557,7 @@ def lstm(input_var=None):

# Create connected layers
l_in = InputLayer(input_shape=(None, 28 * 28), input_var=input_var, name='Input')
l_lstm = LSTM(incoming=l_in, n_hidden=100, n_out=28 * 28, name='Long Short Term Memory')
l_lstm = LSTM(incoming=l_in, n_units=100, name='Long Short Term Memory')
l_out = LogisticRegression(incoming=l_lstm, n_class=10, name='Logistic regression')

# Create network and add layers
Expand Down
219 changes: 198 additions & 21 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,22 @@ def layer(self):
from yadll.layers import Layer
return Layer(mock())

@pytest.fixture
def layer2(self):
from yadll.layers import Layer
return Layer(mock())

@pytest.fixture
def named_layer(self):
from yadll.layers import Layer
return Layer(mock(), name='layer_name')

def test_layer_from_none(self):
from yadll.layers import Layer
layer = Layer(incoming=None)
assert layer.input_layer is None
assert layer.input_shape is None

def test_input_shape(self, layer):
assert layer.input_shape == layer.input_layer.output_shape

Expand Down Expand Up @@ -46,6 +57,14 @@ def test_layer_from_shape(self, layer_from_shape):
assert layer.input_layer is None
assert layer.input_shape == (None, 20)

def test_layer_from_layers(self):
from yadll.layers import Layer
l_1 = Layer((10, 20))
l_2 = Layer((30, 40))
l = Layer([l_1, l_2])
assert l.input_shape == [l_1.input_shape, l_2.input_shape]
assert l.input_layer == [l_1, l_2]


class Testinput_layer:
@pytest.fixture
Expand Down Expand Up @@ -112,11 +131,11 @@ def input_layer(self, input_data):
return InputLayer(shape, input=input_data)

def test_output_shape(self, flatten_layer, input_layer):
layer = flatten_layer(input_layer)
layer = flatten_layer(incoming=input_layer)
assert layer.output_shape == (2, 3 * 4 * 5)

def test_get_output(self, flatten_layer, input_layer, input_data):
layer = flatten_layer(input_layer)
layer = flatten_layer(incoming=input_layer)
result = layer.get_output().eval()
input = np.asarray(input_data.eval())
assert (result == input.reshape(input.shape[0], -1)).all()
Expand All @@ -140,7 +159,7 @@ def input_layer(self, input_data):
return InputLayer(shape, input=input_data)

def test_output_shape(self, activation, input_layer):
layer = activation(input_layer)
layer = activation(incoming=input_layer)
assert layer.output_shape == (2, 3 * 4 * 5)

def test_get_output(self, activation, input_layer, input_data):
Expand Down Expand Up @@ -170,11 +189,11 @@ def input_layer(self, input_data):

@pytest.fixture
def layer(self, dense_layer, input_layer):
return dense_layer(input_layer, n_units=2, l1=1, l2=2)
return dense_layer(incoming=input_layer, n_units=2, l1=1, l2=2)

@pytest.fixture
def layer_from_layer(self, dense_layer, input_layer, layer):
return dense_layer(input_layer, W=layer.W, b=layer.b, n_units=2, l1=1, l2=2)
return dense_layer(incoming=input_layer, W=layer.W, b=layer.b, n_units=2, l1=1, l2=2)

def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
Expand Down Expand Up @@ -221,12 +240,12 @@ def hp(self):
hp('batch_size', 10)
hp('n_epochs', 10)
hp('learning_rate', 0.1)
hp('patience', 1000)
hp('patience', 100)
return hp

@pytest.fixture
def layer(self, unsupervised_layer, input_layer, hp):
return unsupervised_layer(input_layer, n_units=2, hyperparameters=hp)
return unsupervised_layer(incoming=input_layer, n_units=2, hyperparameters=hp)

def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
Expand Down Expand Up @@ -275,17 +294,17 @@ def input_layer(self, input_data):

@pytest.fixture
def layer(self, dropout, input_layer):
return dropout(input_layer, corruption_level=0.5)
return dropout(incoming=input_layer, corruption_level=0.5)

@pytest.fixture
def layer_c0(self, dropout, input_layer):
return dropout(input_layer, corruption_level=0)
return dropout(incoming=input_layer, corruption_level=0)

@pytest.fixture
def layer_c1(self, dropout, input_layer):
return dropout(input_layer, corruption_level=1)
return dropout(incoming=input_layer, corruption_level=1)

def test_get_output(self, input_layer, layer, layer_c0, layer_c1):
def test_get_output(self, input_layer, layer_c0, layer_c1):
np.testing.assert_array_equal(input_layer.get_output().eval(), layer_c0.get_output().eval())
assert np.all(layer_c1.get_output().eval() == 0)

Expand All @@ -309,17 +328,17 @@ def input_layer(self, input_data):

@pytest.fixture
def layer(self, dropconnect, input_layer):
return dropconnect(input_layer, n_units=10, corruption_level=0.5)
return dropconnect(incoming=input_layer, n_units=10, corruption_level=0.5)

@pytest.fixture
def layer_c0(self, dropconnect, input_layer):
return dropconnect(input_layer, n_units=10, corruption_level=0)
return dropconnect(incoming=input_layer, n_units=10, corruption_level=0)

@pytest.fixture
def layer_c1(self, dropconnect, input_layer):
return dropconnect(input_layer, n_units=10, corruption_level=1)
return dropconnect(incoming=input_layer, n_units=10, corruption_level=1)

def test_get_output(self, input_layer, layer, layer_c0, layer_c1):
def test_get_output(self, layer_c1):
assert np.all(layer_c1.get_output().eval() == 0)


Expand All @@ -342,7 +361,10 @@ def input_layer(self, input_data):

@pytest.fixture
def layer(self, pool_layer, input_layer):
return pool_layer(input_layer, poolsize=(2, 2))
return pool_layer(incoming=input_layer, poolsize=(2, 2))

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestConvLayer:
Expand All @@ -351,40 +373,141 @@ def conv_layer(self):
from yadll.layers import ConvLayer
return ConvLayer

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((100, 1, 28, 28)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 1, 28, 28)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, conv_layer, input_layer):
return conv_layer(incoming=input_layer, image_shape=(None, 1, 28, 28), filter_shape=(20, 1, 5, 5))

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestConvPoolLayer:
@pytest.fixture
def conv_pool_layer(self):
from yadll.layers import ConvPoolLayer
return ConvPoolLayer

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((100, 1, 28, 28)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 1, 28, 28)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, conv_pool_layer, input_layer):
return conv_pool_layer(incoming=input_layer, image_shape=(None, 1, 28, 28), filter_shape=(20, 1, 5, 5), poolsize=(2, 2))

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestAutoEncoder:
@pytest.fixture
def auto_encoder(self):
from yadll.layers import AutoEncoder
return AutoEncoder

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 20)
return InputLayer(shape, input=input_data)

@pytest.fixture
def unsupervised_hp(self):
from yadll.hyperparameters import Hyperparameters
hp = Hyperparameters()
hp('batch_size', 10)
hp('n_epochs', 15)
hp('learning_rate', 0.01)
return hp

@pytest.fixture
def layer(self, auto_encoder, input_layer, unsupervised_hp):
return auto_encoder(incoming=input_layer, n_units=10, hyperparameters=unsupervised_hp)

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestRBM:
@pytest.fixture
def rbm(self):
from yadll.layers import RBM
return RBM

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 20)
return InputLayer(shape, input=input_data)

@pytest.fixture
def unsupervised_hp(self):
from yadll.hyperparameters import Hyperparameters
hp = Hyperparameters()
hp('batch_size', 10)
hp('n_epochs', 15)
hp('learning_rate', 0.01)
return hp

@pytest.fixture
def layer(self, rbm, input_layer, unsupervised_hp):
return rbm(incoming=input_layer, n_units=10, hyperparameters=unsupervised_hp)

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestBatchNormalization:
@pytest.fixture
def batch_normalization(self):
from yadll.layers import BatchNormalization
return BatchNormalization

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20)))

class TestLayerNormalization:
@pytest.fixture
def layer_normalization(self):
from yadll.layers import BatchNormalization
return BatchNormalization
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (10, 20)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, batch_normalization, input_layer):
return batch_normalization(incoming=input_layer)

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestRNN:
Expand All @@ -393,16 +516,70 @@ def rnn(self):
from yadll.layers import RNN
return RNN

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20, 30)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 20, 30)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, rnn, input_layer):
return rnn(incoming=input_layer, n_units=100)

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestLSTM:
@pytest.fixture
def lstm(self):
from yadll.layers import LSTM
return LSTM

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20, 30)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 20, 30)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, lstm, input_layer):
return lstm(incoming=input_layer, n_units=10)

def test_get_output(self, layer):
output = layer.get_output().eval()


class TestGRU:
@pytest.fixture
def gru(self):
from yadll.layers import GRU
return GRU
return GRU

@pytest.fixture
def input_data(self):
from yadll.utils import shared_variable
return shared_variable(np.random.random((10, 20, 30)))

@pytest.fixture
def input_layer(self, input_data):
from yadll.layers import InputLayer
shape = (None, 20, 30)
return InputLayer(shape, input=input_data)

@pytest.fixture
def layer(self, gru, input_layer):
return gru(incoming=input_layer, n_units=10)

def test_get_output(self, layer):
output = layer.get_output().eval()

0 comments on commit a6bc7ca

Please sign in to comment.