Skip to content
This repository has been archived by the owner on Jul 10, 2021. It is now read-only.

Commit

Permalink
Making the new Layer-based construction syntax mandatory, porting a…
Browse files Browse the repository at this point in the history
…ll of the tests accordingly.
  • Loading branch information
alexjc committed Apr 27, 2015
1 parent 354dbbe commit 99f9155
Show file tree
Hide file tree
Showing 10 changed files with 66 additions and 56 deletions.
10 changes: 5 additions & 5 deletions sknn/mlp.py
Expand Up @@ -48,6 +48,7 @@ class Layer(object):
def __init__(
self,
type,
nop=None,
name=None,
units=None,
pieces=None,
Expand Down Expand Up @@ -104,6 +105,8 @@ def __init__(
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
"""
assert nop is None,\
"Specify layer parameters as keyword arguments, not positional arguments."

self.name = name
self.type = type
Expand Down Expand Up @@ -212,16 +215,13 @@ def __init__(

self.layers = []
for i, layer in enumerate(layers):
if isinstance(layer, tuple):
if len(layer) == 1:
layer = (layer[0], None)
layer = Layer(layer[0], units=layer[1])
assert isinstance(layer, Layer),\
"Specify each layer as an instance of a `sknn.mlp.Layer` object."

if layer.name is None:
label = "Hidden" if i < len(layers)-1 else "Output"
layer.name = "%s_%i_%s" % (label, i, layer.type)

assert type(layer) == Layer
self.layers.append(layer)

self.random_state = random_state
Expand Down
17 changes: 9 additions & 8 deletions sknn/tests/test_classifier.py
Expand Up @@ -5,12 +5,13 @@
from sklearn.base import clone

from sknn.mlp import MultiLayerPerceptronClassifier as MLPC
from sknn.mlp import Layer as L


class TestClassifierFunctionality(unittest.TestCase):

def setUp(self):
self.nn = MLPC(layers=[("Linear",)], n_iter=1)
self.nn = MLPC(layers=[L("Linear")], n_iter=1)

def test_FitAutoInitialize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
Expand Down Expand Up @@ -49,7 +50,7 @@ def test_CalculateScore(self):
class TestClassifierClone(TestClassifierFunctionality):

def setUp(self):
cc = MLPC(layers=[("Linear",)], n_iter=1)
cc = MLPC(layers=[L("Linear")], n_iter=1)
self.nn = clone(cc)

# This runs the same tests on the clone as for the original above.
Expand All @@ -65,13 +66,13 @@ def check_values(self, params):
assert_equal(params['valid_size'], 0.2)

def test_GetParamValues(self):
nn = MLPC(layers=[("Linear",)], learning_rate=0.05, n_iter=456,
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=0.25)
params = nn.get_params()
self.check_values(params)

def test_CloneWithValues(self):
nn = MLPC(layers=[("Linear",)], learning_rate=0.05, n_iter=456,
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=0.25)
cc = clone(nn)
params = cc.get_params()
Expand All @@ -85,20 +86,20 @@ def check_defaults(self, params):
assert_equal(params['valid_size'], 0.0)

def test_GetParamDefaults(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
params = nn.get_params()
self.check_defaults(params)

def test_CloneDefaults(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
cc = clone(nn)
params = cc.get_params()
self.check_defaults(params)

def test_ConvertToString(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(str(nn)))

def test_Representation(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(repr(nn)))
18 changes: 9 additions & 9 deletions sknn/tests/test_conv.py
Expand Up @@ -19,42 +19,42 @@ def test_SquareKernel(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3)),
("Linear",)],
L("Linear")],
n_iter=1))

def test_KernelPooling(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
("Linear",)],
L("Linear")],
n_iter=1))

def test_VerticalKernel(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(16,1)),
("Linear",)],
L("Linear")],
n_iter=1))

def test_VerticalVerbose(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(16,1)),
("Linear",)],
L("Linear")],
n_iter=1, verbose=1, valid_size=0.1))

def test_HorizontalKernel(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(1,16)),
("Linear",)],
L("Linear")],
n_iter=1))

def test_ValidationSet(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3)),
("Linear",)],
L("Linear")],
n_iter=1,
valid_size=0.5))

Expand All @@ -64,23 +64,23 @@ def test_MultipleLayers(self):
L("Convolution", channels=6, kernel_shape=(3,3)),
L("Convolution", channels=4, kernel_shape=(5,5)),
L("Convolution", channels=8, kernel_shape=(3,3)),
("Linear",)],
L("Linear")],
n_iter=1))

def test_PoolingMaxType(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3),
pool_shape=(2,2), pool_type='max'),
("Linear",)],
L("Linear")],
n_iter=1))

def test_PoolingMeanType(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3),
pool_shape=(2,2), pool_type='mean'),
("Linear",)],
L("Linear")],
n_iter=1))


Expand Down
18 changes: 9 additions & 9 deletions sknn/tests/test_deep.py
Expand Up @@ -17,20 +17,20 @@ class TestDeepNetwork(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(
layers=[
("Rectifier", 16),
("Sigmoid", 12),
L("Rectifier", units=16),
L("Sigmoid", units=12),
L("Maxout", units=16, pieces=2),
("Tanh", 4),
("Linear",)],
L("Tanh", units=4),
L("Linear")],
n_iter=1)

def test_UnknownOuputActivation(self):
nn = MLPR(layers=[("Unknown", 16)])
nn = MLPR(layers=[L("Unknown", units=16)])
a_in = numpy.zeros((8,16))
assert_raises(NotImplementedError, nn.fit, a_in, a_in)

def test_UnknownHiddenActivation(self):
nn = MLPR(layers=[("Unknown", 8), ("Linear",)])
nn = MLPR(layers=[L("Unknown", units=8), L("Linear")])
a_in = numpy.zeros((8,16))
assert_raises(NotImplementedError, nn.fit, a_in, a_in)

Expand All @@ -45,22 +45,22 @@ def setUp(self):

def run_EqualityTest(self, copier, asserter):
for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), ("Linear", 1)], random_state=1234)
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn1._initialize(self.a_in, self.a_out)

nn2 = copier(nn1, activation)
asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))

def test_DifferentSeedPredictNotEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), ("Linear", 1)], random_state=2345)
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=2345)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_false)

def test_SameSeedPredictEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), ("Linear", 1)], random_state=1234)
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_true)
Expand Down
9 changes: 5 additions & 4 deletions sknn/tests/test_linear.py
Expand Up @@ -7,12 +7,13 @@
import numpy

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L


class TestLinearNetwork(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_LifeCycle(self):
del self.nn
Expand All @@ -34,7 +35,7 @@ def test_FitWrongSize(self):
class TestInputOutputs(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_FitOneDimensional(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,))
Expand All @@ -44,7 +45,7 @@ def test_FitOneDimensional(self):
class TestSerialization(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_SerializeFail(self):
buf = io.BytesIO()
Expand All @@ -67,7 +68,7 @@ def test_SerializeCorrect(self):
class TestSerializedNetwork(TestLinearNetwork):

def setUp(self):
self.original = MLPR(layers=[("Linear",)])
self.original = MLPR(layers=[L("Linear")])
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.original._initialize(a_in, a_out)

Expand Down
7 changes: 4 additions & 3 deletions sknn/tests/test_output.py
@@ -1,18 +1,19 @@
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR, MultiLayerPerceptronClassifier as MLPC
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L

from . import test_linear


class TestGaussianOutput(test_linear.TestLinearNetwork):

def setUp(self):
self.nn = MLPR(layers=[("Gaussian",)], n_iter=1)
self.nn = MLPR(layers=[L("Gaussian")], n_iter=1)


class TestSoftmaxOutput(test_linear.TestLinearNetwork):

def setUp(self):
self.nn = MLPR(layers=[("Softmax",)], n_iter=1)
self.nn = MLPR(layers=[L("Softmax")], n_iter=1)
5 changes: 3 additions & 2 deletions sknn/tests/test_pipeline.py
Expand Up @@ -9,6 +9,7 @@
from sklearn.preprocessing import MinMaxScaler

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L


class TestPipeline(unittest.TestCase):
Expand All @@ -20,14 +21,14 @@ def _run(self, pipeline):

def test_NeuralNetworkOnly(self):
pipeline = Pipeline([
('neural network', MLPR(layers=[("Linear",)], n_iter=1))
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)

def test_ScalerThenNeuralNetwork(self):
pipeline = Pipeline([
('min/max scaler', MinMaxScaler()),
('neural network', MLPR(layers=[("Linear",)], n_iter=1))
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)

Expand Down
16 changes: 8 additions & 8 deletions sknn/tests/test_rules.py
Expand Up @@ -10,17 +10,17 @@
class TestLearningRules(unittest.TestCase):

def test_Default(self):
self._run(MLPR(layers=[("Linear",)],
self._run(MLPR(layers=[L("Linear")],
learning_rule='sgd',
n_iter=1))

def test_Momentum(self):
self._run(MLPR(layers=[("Linear",)],
self._run(MLPR(layers=[L("Linear")],
learning_rule='momentum',
n_iter=1))

def test_Nesterov(self):
self._run(MLPR(layers=[("Linear",)],
self._run(MLPR(layers=[L("Linear")],
learning_rule='nesterov',
n_iter=1))

Expand All @@ -33,27 +33,27 @@ def test_Nesterov(self):
# n_iter=1))

def test_AdaDelta(self):
self._run(MLPR(layers=[("Linear",)],
self._run(MLPR(layers=[L("Linear")],
learning_rule='adadelta',
n_iter=1))

def test_RmsProp(self):
self._run(MLPR(layers=[("Linear",)],
self._run(MLPR(layers=[L("Linear")],
learning_rule='rmsprop',
n_iter=1))

def test_DropoutAsBool(self):
self._run(MLPR(layers=[("Sigmoid", 8), ("Linear",)],
self._run(MLPR(layers=[L("Sigmoid", units=8), L("Linear")],
dropout=True,
n_iter=1))

def test_DropoutAsFloat(self):
self._run(MLPR(layers=[("Tanh", 8), ("Linear",)],
self._run(MLPR(layers=[L("Tanh", units=8), L("Linear",)],
dropout=0.25,
n_iter=1))

def test_DropoutPerLayer(self):
self._run(MLPR(layers=[L("Tanh", units=8, dropout=0.25), ("Linear",)],
self._run(MLPR(layers=[L("Tanh", units=8, dropout=0.25), L("Linear")],
dropout=True,
n_iter=1))

Expand Down

0 comments on commit 99f9155

Please sign in to comment.