Skip to content
This repository has been archived by the owner on Jul 10, 2021. It is now read-only.

Commit

Permalink
Merge bea3bba into c638eef
Browse files Browse the repository at this point in the history
  • Loading branch information
alexjc committed Apr 27, 2015
2 parents c638eef + bea3bba commit 91e4e39
Show file tree
Hide file tree
Showing 10 changed files with 365 additions and 131 deletions.
259 changes: 198 additions & 61 deletions sknn/mlp.py

Large diffs are not rendered by default.

25 changes: 13 additions & 12 deletions sknn/tests/test_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
from sklearn.base import clone

from sknn.mlp import MultiLayerPerceptronClassifier as MLPC
from sknn.mlp import Layer as L


class TestClassifierFunctionality(unittest.TestCase):

def setUp(self):
self.nn = MLPC(layers=[("Linear",)], n_iter=1)
self.nn = MLPC(layers=[L("Linear")], n_iter=1)

def test_FitAutoInitialize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
Expand Down Expand Up @@ -49,7 +50,7 @@ def test_CalculateScore(self):
class TestClassifierClone(TestClassifierFunctionality):

def setUp(self):
cc = MLPC(layers=[("Linear",)], n_iter=1)
cc = MLPC(layers=[L("Linear")], n_iter=1)
self.nn = clone(cc)

# This runs the same tests on the clone as for the original above.
Expand All @@ -61,18 +62,18 @@ def check_values(self, params):
assert_equal(params['learning_rate'], 0.05)
assert_equal(params['n_iter'], 456)
assert_equal(params['n_stable'], 123)
assert_equal(params['dropout'], True)
assert_equal(params['dropout'], 0.25)
assert_equal(params['valid_size'], 0.2)

def test_GetParamValues(self):
nn = MLPC(layers=[("Linear",)], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=True)
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=0.25)
params = nn.get_params()
self.check_values(params)

def test_CloneWithValues(self):
nn = MLPC(layers=[("Linear",)], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=True)
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout=0.25)
cc = clone(nn)
params = cc.get_params()
self.check_values(params)
Expand All @@ -81,24 +82,24 @@ def check_defaults(self, params):
assert_equal(params['learning_rate'], 0.01)
assert_equal(params['n_iter'], None)
assert_equal(params['n_stable'], 50)
assert_equal(params['dropout'], False)
assert_equal(params['dropout'], 0.0)
assert_equal(params['valid_size'], 0.0)

def test_GetParamDefaults(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
params = nn.get_params()
self.check_defaults(params)

def test_CloneDefaults(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
cc = clone(nn)
params = cc.get_params()
self.check_defaults(params)

def test_ConvertToString(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(str(nn)))

def test_Representation(self):
nn = MLPC(layers=[("Gaussian",)])
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(repr(nn)))
54 changes: 38 additions & 16 deletions sknn/tests/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L


class TestConvolution(unittest.TestCase):
Expand All @@ -17,47 +18,69 @@ def _run(self, nn):
def test_SquareKernel(self):
self._run(MLPR(
layers=[
("Convolution", 4, (2,2)),
("Linear",)],
L("Convolution", channels=4, kernel_shape=(3,3)),
L("Linear")],
n_iter=1))

def test_VerticalKernel(self):
def test_KernelPooling(self):
self._run(MLPR(
layers=[
("Convolution", 4, (16,1)),
("Linear",)],
L("Convolution", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
L("Linear")],
n_iter=1))

def test_VerticalKernel(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(16,1)),
L("Linear")],
n_iter=1))

def test_VerticalVerbose(self):
self._run(MLPR(
layers=[
("Convolution", 4, (16,1)),
("Linear",)],
L("Convolution", channels=4, kernel_shape=(16,1)),
L("Linear")],
n_iter=1, verbose=1, valid_size=0.1))

def test_HorizontalKernel(self):
self._run(MLPR(
layers=[
("Convolution", 4, (1,16)),
("Linear",)],
L("Convolution", channels=4, kernel_shape=(1,16)),
L("Linear")],
n_iter=1))

def test_ValidationSet(self):
self._run(MLPR(
layers=[
("Convolution", 4, (2,2)),
("Linear",)],
L("Convolution", channels=4, kernel_shape=(3,3)),
L("Linear")],
n_iter=1,
valid_size=0.5))

def test_MultipleLayers(self):
self._run(MLPR(
layers=[
("Convolution", 6, (3,3)),
("Convolution", 4, (5,5)),
("Convolution", 8, (3,3)),
("Linear",)],
L("Convolution", channels=6, kernel_shape=(3,3)),
L("Convolution", channels=4, kernel_shape=(5,5)),
L("Convolution", channels=8, kernel_shape=(3,3)),
L("Linear")],
n_iter=1))

def test_PoolingMaxType(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3),
pool_shape=(2,2), pool_type='max'),
L("Linear")],
n_iter=1))

def test_PoolingMeanType(self):
self._run(MLPR(
layers=[
L("Convolution", channels=4, kernel_shape=(3,3),
pool_shape=(2,2), pool_type='mean'),
L("Linear")],
n_iter=1))


Expand All @@ -68,4 +91,3 @@ def _run(self, nn):
nn.fit(a_in, a_out)
a_test = nn.predict(a_in)
assert_equal(type(a_out), type(a_in))

58 changes: 47 additions & 11 deletions sknn/tests/test_deep.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
import unittest
from nose.tools import (assert_false, assert_raises, assert_true, assert_equal)
from nose.tools import (assert_false, assert_raises, assert_true,
assert_equal, assert_in)

import io
import pickle
import numpy
import logging

from sklearn.base import clone

import sknn
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L

from . import test_linear


Expand All @@ -15,20 +21,23 @@ class TestDeepNetwork(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(
layers=[
("Rectifier", 16),
("Sigmoid", 12),
("Maxout", 8, 2),
("Tanh", 4),
("Linear",)],
L("Rectifier", units=16),
L("Sigmoid", units=12),
L("Maxout", units=16, pieces=2),
L("Tanh", units=4),
L("Linear")],
n_iter=1)

def test_UnknownLayer(self):
assert_raises(NotImplementedError, L, "Unknown")

def test_UnknownOuputActivation(self):
nn = MLPR(layers=[("Unknown", 8)])
nn = MLPR(layers=[L("Rectifier", units=16)])
a_in = numpy.zeros((8,16))
assert_raises(NotImplementedError, nn.fit, a_in, a_in)

def test_UnknownHiddenActivation(self):
nn = MLPR(layers=[("Unknown", 8), ("Linear",)])
nn = MLPR(layers=[L("Gaussian", units=8), L("Linear")])
a_in = numpy.zeros((8,16))
assert_raises(NotImplementedError, nn.fit, a_in, a_in)

Expand All @@ -43,22 +52,22 @@ def setUp(self):

def run_EqualityTest(self, copier, asserter):
for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
nn1 = MLPR(layers=[(activation, 16, 2), ("Linear", 8)], random_state=1234)
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn1._initialize(self.a_in, self.a_out)

nn2 = copier(nn1, activation)
asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))

def test_DifferentSeedPredictNotEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[(activation, 16, 2), ("Linear", 8)], random_state=2345)
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=2345)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_false)

def test_SameSeedPredictEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[(activation, 16, 2), ("Linear", 8)], random_state=1234)
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_true)
Expand All @@ -77,3 +86,30 @@ def serialize(nn, _):
buf.seek(0)
return pickle.load(buf)
self.run_EqualityTest(serialize, assert_true)


class TestActivations(unittest.TestCase):

def setUp(self):
self.buf = io.StringIO()
self.hnd = logging.StreamHandler(self.buf)
logging.getLogger('sknn').addHandler(self.hnd)
logging.getLogger().setLevel(logging.WARNING)

def tearDown(self):
assert_equal('', self.buf.getvalue())
sknn.mlp.log.removeHandler(self.hnd)

def test_MissingParameterException(self):
nn = MLPR(layers=[L("Maxout", units=32), L("Linear")])
a_in = numpy.zeros((8,16))
assert_raises(ValueError, nn._initialize, a_in, a_in)

def test_UnusedParameterWarning(self):
nn = MLPR(layers=[L("Linear", kernel_shape=(1,1))], n_iter=1)
a_in = numpy.zeros((8,16))
nn._initialize(a_in, a_in)

assert_in('Parameter `kernel_shape` is unused', self.buf.getvalue())
self.buf = io.StringIO() # clear

9 changes: 5 additions & 4 deletions sknn/tests/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
import numpy

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L


class TestLinearNetwork(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_LifeCycle(self):
del self.nn
Expand All @@ -34,7 +35,7 @@ def test_FitWrongSize(self):
class TestInputOutputs(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_FitOneDimensional(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,))
Expand All @@ -44,7 +45,7 @@ def test_FitOneDimensional(self):
class TestSerialization(unittest.TestCase):

def setUp(self):
self.nn = MLPR(layers=[("Linear",)], n_iter=1)
self.nn = MLPR(layers=[L("Linear")], n_iter=1)

def test_SerializeFail(self):
buf = io.BytesIO()
Expand All @@ -67,7 +68,7 @@ def test_SerializeCorrect(self):
class TestSerializedNetwork(TestLinearNetwork):

def setUp(self):
self.original = MLPR(layers=[("Linear",)])
self.original = MLPR(layers=[L("Linear")])
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.original._initialize(a_in, a_out)

Expand Down
7 changes: 4 additions & 3 deletions sknn/tests/test_output.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR, MultiLayerPerceptronClassifier as MLPC
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L

from . import test_linear


class TestGaussianOutput(test_linear.TestLinearNetwork):

def setUp(self):
self.nn = MLPR(layers=[("Gaussian",)], n_iter=1)
self.nn = MLPR(layers=[L("Gaussian")], n_iter=1)


class TestSoftmaxOutput(test_linear.TestLinearNetwork):

def setUp(self):
self.nn = MLPR(layers=[("Softmax",)], n_iter=1)
self.nn = MLPR(layers=[L("Softmax")], n_iter=1)
5 changes: 3 additions & 2 deletions sknn/tests/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from sklearn.preprocessing import MinMaxScaler

from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from sknn.mlp import Layer as L


class TestPipeline(unittest.TestCase):
Expand All @@ -20,14 +21,14 @@ def _run(self, pipeline):

def test_NeuralNetworkOnly(self):
pipeline = Pipeline([
('neural network', MLPR(layers=[("Linear",)], n_iter=1))
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)

def test_ScalerThenNeuralNetwork(self):
pipeline = Pipeline([
('min/max scaler', MinMaxScaler()),
('neural network', MLPR(layers=[("Linear",)], n_iter=1))
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)

Expand Down
Loading

0 comments on commit 91e4e39

Please sign in to comment.