From cfca0338b0eabada737cd300924dd2f84de9336c Mon Sep 17 00:00:00 2001 From: Mang Wang Date: Thu, 19 Nov 2015 16:43:49 +0000 Subject: [PATCH 1/4] change data type of weights and biases to numpy.float32 in order to pass this unit test when using gpu and setting theano flag to floatX = float32 --- sknn/tests/test_data.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sknn/tests/test_data.py b/sknn/tests/test_data.py index 26de705..b94d4dd 100644 --- a/sknn/tests/test_data.py +++ b/sknn/tests/test_data.py @@ -57,6 +57,8 @@ def test_SetLayerParamsList(self): weights = numpy.random.uniform(-1.0, +1.0, (16,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) + weights = weights.astype(numpy.float32) + biases = biases.astype(numpy.float32) nn.set_parameters([(weights, biases)]) p = nn.get_parameters() @@ -70,6 +72,8 @@ def test_LayerParamsSkipOneWithNone(self): weights = numpy.random.uniform(-1.0, +1.0, (32,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) + weights = weights.astype(numpy.float32) + biases = biases.astype(numpy.float32) nn.set_parameters([None, (weights, biases)]) p = nn.get_parameters() @@ -83,6 +87,8 @@ def test_SetLayerParamsDict(self): weights = numpy.random.uniform(-1.0, +1.0, (32,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) + weights = weights.astype(numpy.float32) + biases = biases.astype(numpy.float32) nn.set_parameters({'abcd': (weights, biases)}) p = nn.get_parameters() From ebe648afdde4026a488e216b877d732e0a5e1038 Mon Sep 17 00:00:00 2001 From: Mang Wang Date: Fri, 20 Nov 2015 13:14:47 +0000 Subject: [PATCH 2/4] modify the unit test data type error in file mlp.py (and also assert_true in file test_data.py) when setting theano flag to floatX=float32 --- sknn/backend/lasagne/mlp.py | 4 ++++ sknn/tests/test_data.py | 18 ++++++------------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/sknn/backend/lasagne/mlp.py b/sknn/backend/lasagne/mlp.py index 1e1cfbe..e96e4c9 100644 --- a/sknn/backend/lasagne/mlp.py +++ b/sknn/backend/lasagne/mlp.py @@ -276,6 +276,10 @@ def _array_to_mlp(self, array, nn): for layer, data in zip(nn, array): if data is None: continue weights, biases = data + if weights.dtype == numpy.float64: + weights = weights.astype(numpy.float32) + if biases.dtype == numpy.float64: + biases = biases.astype(numpy.float32) while not hasattr(layer, 'W') and not hasattr(layer, 'b'): layer = layer.input_layer diff --git a/sknn/tests/test_data.py b/sknn/tests/test_data.py index b94d4dd..3edddfc 100644 --- a/sknn/tests/test_data.py +++ b/sknn/tests/test_data.py @@ -57,13 +57,11 @@ def test_SetLayerParamsList(self): weights = numpy.random.uniform(-1.0, +1.0, (16,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) - weights = weights.astype(numpy.float32) - biases = biases.astype(numpy.float32) nn.set_parameters([(weights, biases)]) p = nn.get_parameters() - assert_true((p[0].weights == weights).all()) - assert_true((p[0].biases == biases).all()) + assert_true((p[0].weights == weights.astype(numpy.float32)).all()) + assert_true((p[0].biases == biases.astype(numpy.float32)).all()) def test_LayerParamsSkipOneWithNone(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -72,13 +70,11 @@ def test_LayerParamsSkipOneWithNone(self): weights = numpy.random.uniform(-1.0, +1.0, (32,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) - weights = weights.astype(numpy.float32) - biases = biases.astype(numpy.float32) nn.set_parameters([None, (weights, biases)]) p = nn.get_parameters() - assert_true((p[1].weights == weights).all()) - assert_true((p[1].biases == biases).all()) + assert_true((p[1].weights == weights.astype(numpy.float32)).all()) + assert_true((p[1].biases == biases.astype(numpy.float32)).all()) def test_SetLayerParamsDict(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -87,10 +83,8 @@ def test_SetLayerParamsDict(self): weights = numpy.random.uniform(-1.0, +1.0, (32,4)) biases = numpy.random.uniform(-1.0, +1.0, (4,)) - weights = weights.astype(numpy.float32) - biases = biases.astype(numpy.float32) nn.set_parameters({'abcd': (weights, biases)}) p = nn.get_parameters() - assert_true((p[1].weights == weights).all()) - assert_true((p[1].biases == biases).all()) + assert_true((p[1].weights == weights.astype(numpy.float32)).all()) + assert_true((p[1].biases == biases.astype(numpy.float32)).all()) From e5a87f99bdeb504e2dc133ff28ecdc2a653b0660 Mon Sep 17 00:00:00 2001 From: Mang Wang Date: Fri, 20 Nov 2015 14:28:28 +0000 Subject: [PATCH 3/4] change the dtype to theano.config.floatX in both mlp.py and test_data.py files --- sknn/backend/lasagne/mlp.py | 6 ++---- sknn/tests/test_data.py | 13 +++++++------ 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/sknn/backend/lasagne/mlp.py b/sknn/backend/lasagne/mlp.py index e96e4c9..4c0c8ab 100644 --- a/sknn/backend/lasagne/mlp.py +++ b/sknn/backend/lasagne/mlp.py @@ -276,10 +276,8 @@ def _array_to_mlp(self, array, nn): for layer, data in zip(nn, array): if data is None: continue weights, biases = data - if weights.dtype == numpy.float64: - weights = weights.astype(numpy.float32) - if biases.dtype == numpy.float64: - biases = biases.astype(numpy.float32) + weights = weights.astype(theano.config.floatX) + biases = biases.astype(theano.config.floatX) while not hasattr(layer, 'W') and not hasattr(layer, 'b'): layer = layer.input_layer diff --git a/sknn/tests/test_data.py b/sknn/tests/test_data.py index 3edddfc..f834521 100644 --- a/sknn/tests/test_data.py +++ b/sknn/tests/test_data.py @@ -4,6 +4,7 @@ import logging import numpy +import theano from sknn.mlp import Regressor as MLPR from sknn.mlp import Layer as L, Convolution as C @@ -60,8 +61,8 @@ def test_SetLayerParamsList(self): nn.set_parameters([(weights, biases)]) p = nn.get_parameters() - assert_true((p[0].weights == weights.astype(numpy.float32)).all()) - assert_true((p[0].biases == biases.astype(numpy.float32)).all()) + assert_true((p[0].weights == weights.astype(theano.config.floatX)).all()) + assert_true((p[0].biases == biases.astype(theano.config.floatX)).all()) def test_LayerParamsSkipOneWithNone(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -73,8 +74,8 @@ def test_LayerParamsSkipOneWithNone(self): nn.set_parameters([None, (weights, biases)]) p = nn.get_parameters() - assert_true((p[1].weights == weights.astype(numpy.float32)).all()) - assert_true((p[1].biases == biases.astype(numpy.float32)).all()) + assert_true((p[1].weights == weights.astype(theano.config.floatX)).all()) + assert_true((p[1].biases == biases.astype(theano.config.floatX)).all()) def test_SetLayerParamsDict(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -86,5 +87,5 @@ def test_SetLayerParamsDict(self): nn.set_parameters({'abcd': (weights, biases)}) p = nn.get_parameters() - assert_true((p[1].weights == weights.astype(numpy.float32)).all()) - assert_true((p[1].biases == biases.astype(numpy.float32)).all()) + assert_true((p[1].weights == weights.astype(theano.config.floatX)).all()) + assert_true((p[1].biases == biases.astype(theano.config.floatX)).all()) From e46e9a625a10774067a8659d2e6342705fc62303 Mon Sep 17 00:00:00 2001 From: Mang Wang Date: Fri, 20 Nov 2015 18:35:29 +0000 Subject: [PATCH 4/4] mainly fixed the dtype of arguments in asser_true() funtion to make this assertion true --- sknn/backend/lasagne/mlp.py | 6 ++---- sknn/tests/test_data.py | 13 ++++++------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/sknn/backend/lasagne/mlp.py b/sknn/backend/lasagne/mlp.py index 4c0c8ab..3cbee3d 100644 --- a/sknn/backend/lasagne/mlp.py +++ b/sknn/backend/lasagne/mlp.py @@ -276,8 +276,6 @@ def _array_to_mlp(self, array, nn): for layer, data in zip(nn, array): if data is None: continue weights, biases = data - weights = weights.astype(theano.config.floatX) - biases = biases.astype(theano.config.floatX) while not hasattr(layer, 'W') and not hasattr(layer, 'b'): layer = layer.input_layer @@ -285,9 +283,9 @@ def _array_to_mlp(self, array, nn): ws = tuple(layer.W.shape.eval()) assert ws == weights.shape, "Layer weights shape mismatch: %r != %r" %\ (ws, weights.shape) - layer.W.set_value(weights) + layer.W.set_value(weights.astype(theano.config.floatX)) bs = tuple(layer.b.shape.eval()) assert bs == biases.shape, "Layer biases shape mismatch: %r != %r" %\ (bs, biases.shape) - layer.b.set_value(biases) + layer.b.set_value(biases.astype(theano.config.floatX)) diff --git a/sknn/tests/test_data.py b/sknn/tests/test_data.py index f834521..8f0a6cf 100644 --- a/sknn/tests/test_data.py +++ b/sknn/tests/test_data.py @@ -4,7 +4,6 @@ import logging import numpy -import theano from sknn.mlp import Regressor as MLPR from sknn.mlp import Layer as L, Convolution as C @@ -61,8 +60,8 @@ def test_SetLayerParamsList(self): nn.set_parameters([(weights, biases)]) p = nn.get_parameters() - assert_true((p[0].weights == weights.astype(theano.config.floatX)).all()) - assert_true((p[0].biases == biases.astype(theano.config.floatX)).all()) + assert_true((p[0].weights.astype('float32') == weights.astype('float32')).all()) + assert_true((p[0].biases.astype('float32') == biases.astype('float32')).all()) def test_LayerParamsSkipOneWithNone(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -74,8 +73,8 @@ def test_LayerParamsSkipOneWithNone(self): nn.set_parameters([None, (weights, biases)]) p = nn.get_parameters() - assert_true((p[1].weights == weights.astype(theano.config.floatX)).all()) - assert_true((p[1].biases == biases.astype(theano.config.floatX)).all()) + assert_true((p[1].weights.astype('float32') == weights.astype('float32')).all()) + assert_true((p[1].biases.astype('float32') == biases.astype('float32')).all()) def test_SetLayerParamsDict(self): nn = MLPR(layers=[L("Sigmoid", units=32), L("Linear", name='abcd')]) @@ -87,5 +86,5 @@ def test_SetLayerParamsDict(self): nn.set_parameters({'abcd': (weights, biases)}) p = nn.get_parameters() - assert_true((p[1].weights == weights.astype(theano.config.floatX)).all()) - assert_true((p[1].biases == biases.astype(theano.config.floatX)).all()) + assert_true((p[1].weights.astype('float32') == weights.astype('float32')).all()) + assert_true((p[1].biases.astype('float32') == biases.astype('float32')).all())