diff --git a/.gitignore b/.gitignore index 2e36fa083..2ec50d5e9 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ dist docs/_build tensorlayer.egg-info tensorlayer/__pacache__ -venv/ \ No newline at end of file +venv/ +.pytest_cache/ diff --git a/.travis.yml b/.travis.yml index 70097ede2..f66a4f2ff 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,43 +1,56 @@ # https://docs.travis-ci.com/user/languages/python/ language: python + python: - "2.7" - "3.5" - "3.6" +#env: +# global: +# +# ### == PYPI_PASSWORD === ### +# - secure: "##############################################################" +# +# ### === GITHUB_PERSONAL_TOKEN === ### +# - secure: "##############################################################" + + install: - pip install tensorflow - pip install -r requirements.txt - - pip install -r tests/requirements.txt - - pip install . + - pip install .[test] + script: # units test # https://docs.pytest.org/en/latest/ - # TODO: make it work - # - pytest + - pytest + + +before_deploy: + - python setup.py sdist + - python setup.py bdist_wheel + - python setup.py bdist_wheel --universal + - python setup.py egg_info + + +#deploy: +#- provider: pypi +# user: zsdh123 +# password: "$PYPI_PASSWORD" +# skip_cleanup: true +# on: +# tags: true +# python: '3.6' - # smoke tests - # - make test # TODO: make it use python3 by default - - python tests/test_yapf_format.py - - python tests/test_pydocstyle.py - - python tests/test_mnist_simple.py - - python tests/test_reuse_mlp.py - - python tests/test_layers_basic.py - - python tests/test_layers_convolution.py - - python tests/test_layers_core.py - - python tests/test_layers_extend.py - - python tests/test_layers_flow_control.py - - python tests/test_layers_importer.py - - python tests/test_layers_merge.py - - python tests/test_layers_normalization.py - - python tests/test_layers_padding.py - - python tests/test_layers_pooling.py - - python tests/test_layers_recurrent.py - - python tests/test_layers_shape.py - - python tests/test_layers_spatial_transformer.py - - python tests/test_layers_special_activation.py - - python tests/test_layers_stack.py - - python tests/test_layers_super_resolution.py - - python tests/test_layers_time_distributed.py - - python tests/test_models.py +#- provider: releases +# file: +# - dist/* +# - tensorlayer.egg-info/PKG-INFO +# file_glob: true +# skip_cleanup: true +# api_key: "$GITHUB_PERSONAL_TOKEN" +# on: +# tags: true +# python: '3.6' diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000..ad16166ae --- /dev/null +++ b/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +testpaths = tests/ \ No newline at end of file diff --git a/setup.py b/setup.py index e1ef6b84b..2993ff57a 100755 --- a/setup.py +++ b/setup.py @@ -133,9 +133,6 @@ def req_file(filename): extras_require={ 'test': req_file("tests/requirements.txt") }, - - zip_safe=True, - include_package_data=True, scripts=[ 'tl', ], diff --git a/tests/requirements.txt b/tests/requirements.txt index d907538be..055c17a11 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,3 +1,7 @@ -yapf==0.20.2 -pydocstyle==2.1.1 keras==2.1.5 +pydocstyle==2.1.1 +pytest==3.4.1 +pytest-cache==1.0 +pytest-cov==2.5.1 +pytest-xdist==1.22.2 +yapf==0.20.2 \ No newline at end of file diff --git a/tests/test_layers_basic.py b/tests/test_layers_basic.py index 09f4fd2c9..c42952744 100644 --- a/tests/test_layers_basic.py +++ b/tests/test_layers_basic.py @@ -1,38 +1,55 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, [None, 100]) -n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, n_units=80, name='d1') -n = tl.layers.DenseLayer(n, n_units=80, name='d2') -print(n) -n.print_layers() -n.print_params(False) -print(n.count_params()) -if n.count_params() != 14560: - raise Exception("params do not match") +class Layer_Basic_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, [None, 100]) + n = tl.layers.InputLayer(x, name='in') + n = tl.layers.DenseLayer(n, n_units=80, name='d1') + n = tl.layers.DenseLayer(n, n_units=80, name='d2') + + n.print_layers() + n.print_params(False) + + n2 = n[:, :30] + n2.print_layers() + + cls.n_params = n.count_params() + cls.shape_n = n.outputs.get_shape().as_list() + cls.shape_n2 = n2.outputs.get_shape().as_list() + cls.all_layers = n.all_layers + cls.all_params = n.all_params + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_n_params(self): + self.assertEqual(self.n_params, 14560) + + def test_shape_n(self): + self.assertEqual(self.shape_n[-1], 80) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 80: - raise Exception("shape do not match") + def test_all_layers(self): + self.assertEqual(len(self.all_layers), 2) -if len(n.all_layers) != 2: - raise Exception("layers do not match") + def test_all_params(self): + self.assertEqual(len(self.all_params), 4) -if len(n.all_params) != 4: - raise Exception("params do not match") + def test_shape_n2(self): + self.assertEqual(self.shape_n2[-1], 30) -for l in n: - print(l) -n2 = n[:, :30] -print(n2) -n2.print_layers() +if __name__ == '__main__': -shape = n2.outputs.get_shape().as_list() -if shape[-1] != 30: - raise Exception("shape do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -for l in n2: - print(l) + unittest.main() diff --git a/tests/test_layers_convolution.py b/tests/test_layers_convolution.py index b5560352d..d408ec101 100644 --- a/tests/test_layers_convolution.py +++ b/tests/test_layers_convolution.py @@ -1,129 +1,167 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## 1D -x = tf.placeholder(tf.float32, (None, 100, 1)) -nin = tl.layers.InputLayer(x, name='in1') - -n = tl.layers.Conv1dLayer(nin, shape=(5, 1, 32), stride=2) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape do not match") - -n = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2) -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape do not match") - -# AtrousConv1dLayer - -## 2D -x = tf.placeholder(tf.float32, (None, 100, 100, 3)) -nin = tl.layers.InputLayer(x, name='in2') -n = tl.layers.Conv2dLayer( - nin, - act=tf.nn.relu, - shape=(5, 5, 3, 32), - strides=(1, 2, 2, 1), - padding='SAME', - W_init=tf.truncated_normal_initializer(stddev=5e-2), - b_init=tf.constant_initializer(value=0.0), - name='conv2dlayer') -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape do not match") - -n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d') -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape do not match") -n.print_params(False) -if len(n.all_params) != 2: - raise Exception("params do not match") - -n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape do not match") -if len(n.all_params) != 1: - raise Exception("params do not match") - -n = tl.layers.DeConv2dLayer(nin, shape=(5, 5, 32, 3), output_shape=(100, 200, 200, 32), strides=(1, 2, 2, 1), name='deconv2dlayer') -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): - raise Exception("shape do not match") - -print(nin.outputs) -n = tl.layers.DeConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d') -print(n) -shape = n.outputs.get_shape().as_list() -# if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): # TODO: why [None None None 32] ? -if (shape[3] != 32): - raise Exception("shape do not match") - -n = tl.layers.DepthwiseConv2d(nin, shape=(3, 3), strides=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise') -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 6): - raise Exception("shape do not match") - -n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='conv2d2') -n = tl.layers.GroupConv2d(n, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='group') -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 25) or (shape[2] != 25) or (shape[3] != 32): - raise Exception("shape do not match") - -# n = UpSampling2dLayer -# n = DownSampling2dLayer - -# offset1 = tl.layers.Conv2d(nin, 18, (3, 3), (1, 1), padding='SAME', name='offset1') -# net = tl.layers.DeformableConv2d(nin, offset1, 32, (3, 3), name='deformable1') -# offset2 = tl.layers.Conv2d(net, 18, (3, 3), (1, 1), padding='SAME', name='offset2') -# net = tl.layers.DeformableConv2d(net, offset2, 64, (3, 3), name='deformable2') -# net.print_layers() -# net.print_params(False) - -# AtrousConv2dLayer - -n = tl.layers.SeparableConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, name='seperable1') -n.print_layers() -n.print_params(False) - -shape = n.outputs.get_shape().as_list() -if shape[1:] != [98, 98, 32]: - raise Exception("shape do not match") - -if len(n.all_layers) != 1: - raise Exception("layers do not match") - -if len(n.all_params) != 3: - raise Exception("params do not match") - -if n.count_params() != 155: - raise Exception("params do not match") -# exit() - -## 3D -x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) -nin = tl.layers.InputLayer(x, name='in3') - -n = tl.layers.Conv3dLayer(nin, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)) -print(n) -shape = n.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 50) or (shape[4] != 32): - raise Exception("shape do not match") - -# n = tl.layers.DeConv3dLayer(nin, shape=(2, 2, 2, 128, 3), output_shape=(100, 12, 32, 32, 128), strides=(1, 2, 2, 2, 1)) -# print(n) -# shape = n.outputs.get_shape().as_list() - -n = tl.layers.DeConv3d(nin, n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)) -shape = n.outputs.get_shape().as_list() -print(shape) -if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 200) or (shape[4] != 32): - raise Exception("shape do not match") + +class Layer_Convolution_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + ############ + # 1D # + ############ + + x1 = tf.placeholder(tf.float32, (None, 100, 1)) + nin1 = tl.layers.InputLayer(x1, name='in1') + + n1 = tl.layers.Conv1dLayer(nin1, shape=(5, 1, 32), stride=2) + cls.shape_n1 = n1.outputs.get_shape().as_list() + + n2 = tl.layers.Conv1d(nin1, n_filter=32, filter_size=5, stride=2) + cls.shape_n2 = n2.outputs.get_shape().as_list() + + ############ + # 2D # + ############ + + x2 = tf.placeholder(tf.float32, (None, 100, 100, 3)) + nin2 = tl.layers.InputLayer(x2, name='in2') + + n3 = tl.layers.Conv2dLayer( + nin2, + act=tf.nn.relu, + shape=(5, 5, 3, 32), + strides=(1, 2, 2, 1), + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=5e-2), + b_init=tf.constant_initializer(value=0.0), + name='conv2dlayer') + cls.shape_n3 = n3.outputs.get_shape().as_list() + + n4 = tl.layers.Conv2d(nin2, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d') + cls.shape_n4 = n4.outputs.get_shape().as_list() + cls.n4_params = n4.all_params + + n5 = tl.layers.Conv2d(nin2, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') + cls.shape_n5 = n5.outputs.get_shape().as_list() + cls.n5_params = n5.all_params + + n6 = tl.layers.DeConv2dLayer(nin2, shape=(5, 5, 32, 3), output_shape=(100, 200, 200, 32), strides=(1, 2, 2, 1), name='deconv2dlayer') + cls.shape_n6 = n6.outputs.get_shape().as_list() + + n7 = tl.layers.DeConv2d(nin2, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d') + cls.shape_n7 = n7.outputs.get_shape().as_list() + + n8 = tl.layers.DepthwiseConv2d(nin2, shape=(3, 3), strides=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise') + cls.shape_n8 = n8.outputs.get_shape().as_list() + + n9 = tl.layers.Conv2d(nin2, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='conv2d2') + n9 = tl.layers.GroupConv2d(n9, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='group') + cls.shape_n9 = n9.outputs.get_shape().as_list() + + n10 = tl.layers.SeparableConv2d(nin2, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, name='seperable1') + cls.shape_n10 = n10.outputs.get_shape().as_list() + cls.n10_all_layers = n10.all_layers + cls.n10_params = n10.all_params + cls.n10_count_params = n10.count_params() + + ############ + # 3D # + ############ + + x3 = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) + nin3 = tl.layers.InputLayer(x3, name='in3') + + n11 = tl.layers.Conv3dLayer(nin3, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)) + cls.shape_n11 = n11.outputs.get_shape().as_list() + + # n = tl.layers.DeConv3dLayer(nin, shape=(2, 2, 2, 128, 3), output_shape=(100, 12, 32, 32, 128), strides=(1, 2, 2, 2, 1)) + # print(n) + # shape = n.outputs.get_shape().as_list() + + n12 = tl.layers.DeConv3d(nin3, n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)) + cls.shape_n12 = n12.outputs.get_shape().as_list() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_shape_n1(self): + self.assertEqual(self.shape_n1[1], 50) + self.assertEqual(self.shape_n1[2], 32) + + def test_shape_n2(self): + self.assertEqual(self.shape_n2[1], 50) + self.assertEqual(self.shape_n2[2], 32) + + def test_shape_n3(self): + self.assertEqual(self.shape_n3[1], 50) + self.assertEqual(self.shape_n3[2], 50) + self.assertEqual(self.shape_n3[3], 32) + + def test_shape_n4(self): + self.assertEqual(self.shape_n4[1], 50) + self.assertEqual(self.shape_n4[2], 50) + self.assertEqual(self.shape_n4[3], 32) + + def test_shape_n5(self): + self.assertEqual(self.shape_n5[1], 50) + self.assertEqual(self.shape_n5[2], 50) + self.assertEqual(self.shape_n5[3], 32) + + def test_shape_n6(self): + self.assertEqual(self.shape_n6[1], 200) + self.assertEqual(self.shape_n6[2], 200) + self.assertEqual(self.shape_n6[3], 32) + + def test_shape_n7(self): + #self.assertEqual(self.shape_n7[1], 200) # TODO: why [None None None 32] ? + #self.assertEqual(self.shape_n7[2], 200) # TODO: why [None None None 32] ? + self.assertEqual(self.shape_n7[3], 32) + + def test_shape_n8(self): + self.assertEqual(self.shape_n8[1], 50) + self.assertEqual(self.shape_n8[2], 50) + self.assertEqual(self.shape_n8[3], 6) + + def test_shape_n9(self): + self.assertEqual(self.shape_n9[1], 25) + self.assertEqual(self.shape_n9[2], 25) + self.assertEqual(self.shape_n9[3], 32) + + def test_shape_n10(self): + self.assertEqual(self.shape_n10[1:], [98, 98, 32]) + + def test_shape_n11(self): + self.assertEqual(self.shape_n11[1], 50) + self.assertEqual(self.shape_n11[2], 50) + self.assertEqual(self.shape_n11[3], 50) + self.assertEqual(self.shape_n11[4], 32) + + def test_shape_n12(self): + self.assertEqual(self.shape_n12[1], 200) + self.assertEqual(self.shape_n12[2], 200) + self.assertEqual(self.shape_n12[3], 200) + self.assertEqual(self.shape_n12[4], 32) + + def test_params_n4(self): + self.assertEqual(len(self.n4_params), 2) + + def test_params_n5(self): + self.assertEqual(len(self.n5_params), 1) + + def test_params_n10(self): + self.assertEqual(len(self.n10_params), 3) + self.assertEqual(self.n10_count_params, 155) + + def test_layers_n10(self): + self.assertEqual(len(self.n10_all_layers), 1) + + +if __name__ == '__main__': + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_core.py b/tests/test_layers_core.py index 53f0d8a65..c83d14b4f 100644 --- a/tests/test_layers_core.py +++ b/tests/test_layers_core.py @@ -1,182 +1,192 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## DenseLayer -x = tf.placeholder(tf.float32, shape=[None, 30]) -net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, n_units=10, name='dense') - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if shape[-1] != 10: - raise Exception("shape do not match") - -if len(net.all_layers) != 1: - raise Exception("layers do not match") - -if len(net.all_params) != 2: - raise Exception("params do not match") - -if net.count_params() != 310: - raise Exception("params do not match") - -## OneHotInputLayer -x = tf.placeholder(tf.int32, shape=[None]) -net = tl.layers.OneHotInputLayer(x, depth=8, name='onehot') -print(net) - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if shape[-1] != 8: - raise Exception("shape do not match") - -if len(net.all_layers) != 0: - raise Exception("layers do not match") -if len(net.all_params) != 0: - raise Exception("params do not match") +class Layer_Core_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): -if net.count_params() != 0: - raise Exception("params do not match") + cls.batch_size = 8 -## Word2vecEmbeddingInputlayer -batch_size = 8 -train_inputs = tf.placeholder(tf.int32, shape=(batch_size)) -train_labels = tf.placeholder(tf.int32, shape=(batch_size, 1)) -net = tl.layers.Word2vecEmbeddingInputlayer( - inputs=train_inputs, train_labels=train_labels, vocabulary_size=1000, embedding_size=200, num_sampled=64, name='word2vec') -cost = net.nce_cost -train_params = net.all_params + # ============== DenseLayer ============== -net.print_layers() -net.print_params(False) + x1 = tf.placeholder(tf.float32, shape=[None, 30]) + net1 = tl.layers.InputLayer(x1, name='input') + net1 = tl.layers.DenseLayer(net1, n_units=10, name='dense') -shape = net.outputs.get_shape().as_list() -if shape != [8, 200]: - raise Exception("shape do not match") + net1.print_layers() + net1.print_params(False) -if len(net.all_layers) != 1: - raise Exception("layers do not match") + cls.net1_shape = net1.outputs.get_shape().as_list() + cls.net1_layers = net1.all_layers + cls.net1_params = net1.all_params + cls.net1_n_params = net1.count_params() -if len(net.all_params) != 3: - raise Exception("params do not match") + # ============== OneHotInputLayer ============== -if net.count_params() != 401000: - raise Exception("params do not match") + x2 = tf.placeholder(tf.int32, shape=[None]) + net2 = tl.layers.OneHotInputLayer(x2, depth=8, name='onehot') -## EmbeddingInputlayer -batch_size = 8 -x = tf.placeholder(tf.int32, shape=(batch_size, )) -net = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='embed') + net2.print_layers() + net2.print_params(False) -net.print_layers() -net.print_params(False) + cls.net2_shape = net2.outputs.get_shape().as_list() + cls.net2_layers = net2.all_layers + cls.net2_params = net2.all_params + cls.net2_n_params = net2.count_params() -shape = net.outputs.get_shape().as_list() -if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape do not match") + # ============== Word2vecEmbeddingInputlayer ============== -if len(net.all_layers) != 1: - raise Exception("layers do not match") + train_inputs = tf.placeholder(tf.int32, shape=cls.batch_size) + train_labels = tf.placeholder(tf.int32, shape=(cls.batch_size, 1)) + net3 = tl.layers.Word2vecEmbeddingInputlayer( + inputs=train_inputs, train_labels=train_labels, vocabulary_size=1000, embedding_size=200, num_sampled=64, name='word2vec') -if len(net.all_params) != 1: - raise Exception("params do not match") + net3.print_layers() + net3.print_params(False) -if net.count_params() != 50000: - raise Exception("params do not match") + cls.net3_shape = net3.outputs.get_shape().as_list() + cls.net3_layers = net3.all_layers + cls.net3_params = net3.all_params + cls.net3_n_params = net3.count_params() -## AverageEmbeddingInputlayer -batch_size = 8 -length = 5 -x = tf.placeholder(tf.int32, shape=(batch_size, length)) -net = tl.layers.AverageEmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='avg') + # ============== EmbeddingInputlayer ============== -net.print_layers() -net.print_params(False) + x4 = tf.placeholder(tf.int32, shape=(cls.batch_size, )) + net4 = tl.layers.EmbeddingInputlayer(inputs=x4, vocabulary_size=1000, embedding_size=50, name='embed') -shape = net.outputs.get_shape().as_list() -if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape do not match") + net4.print_layers() + net4.print_params(False) -if len(net.all_layers) != 1: - raise Exception("layers do not match") + cls.net4_shape = net4.outputs.get_shape().as_list() + cls.net4_layers = net4.all_layers + cls.net4_params = net4.all_params + cls.net4_n_params = net4.count_params() -if len(net.all_params) != 1: - raise Exception("params do not match") + # ============== AverageEmbeddingInputlayer ============== -if net.count_params() != 50000: - raise Exception("params do not match") + length = 5 + x5 = tf.placeholder(tf.int32, shape=(cls.batch_size, length)) + net5 = tl.layers.AverageEmbeddingInputlayer(inputs=x5, vocabulary_size=1000, embedding_size=50, name='avg') -## ReconLayer -x = tf.placeholder(tf.float32, shape=(None, 784)) -net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, n_units=196, act=tf.nn.sigmoid, name='dense2') -net = tl.layers.ReconLayer(net, x_recon=x, n_units=784, act=tf.nn.sigmoid, name='recon') -# sess = tf.InteractiveSession() -# tl.layers.initialize_global_variables(sess) -# X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -# net.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name=None, n_epoch=1, batch_size=128, print_freq=1, save=True, save_name='w1pre_') + net5.print_layers() + net5.print_params(False) -net.print_layers() -net.print_params(False) + cls.net5_shape = net5.outputs.get_shape().as_list() + cls.net5_layers = net5.all_layers + cls.net5_params = net5.all_params + cls.net5_n_params = net5.count_params() -shape = net.outputs.get_shape().as_list() -if shape[-1] != 784: - raise Exception("shape do not match") + # ============== ReconLayer ============== -if len(net.all_layers) != 2: - raise Exception("layers do not match") + x6 = tf.placeholder(tf.float32, shape=(None, 784)) + net6 = tl.layers.InputLayer(x6, name='input') + net6 = tl.layers.DenseLayer(net6, n_units=196, act=tf.nn.sigmoid, name='dense2') + net6 = tl.layers.ReconLayer(net6, x_recon=x6, n_units=784, act=tf.nn.sigmoid, name='recon') -if len(net.all_params) != 4: - raise Exception("params do not match") + # sess = tf.InteractiveSession() + # tl.layers.initialize_global_variables(sess) + # X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + # net.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name=None, n_epoch=1, batch_size=128, print_freq=1, save=True, save_name='w1pre_') -if net.count_params() != 308308: - raise Exception("params do not match") + net6.print_layers() + net6.print_params(False) -## GaussianNoiseLayer -x = tf.placeholder(tf.float32, shape=(64, 784)) -net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, n_units=100, act=tf.nn.relu, name='dense3') -net = tl.layers.GaussianNoiseLayer(net, name='gaussian') + cls.net6_shape = net6.outputs.get_shape().as_list() + cls.net6_layers = net6.all_layers + cls.net6_params = net6.all_params + cls.net6_n_params = net6.count_params() -net.print_layers() -net.print_params(False) + # ============== GaussianNoiseLayer ============== -shape = net.outputs.get_shape().as_list() -if shape != [64, 100]: - raise Exception("shape do not match") + x7 = tf.placeholder(tf.float32, shape=(64, 784)) + net7 = tl.layers.InputLayer(x7, name='input') + net7 = tl.layers.DenseLayer(net7, n_units=100, act=tf.nn.relu, name='dense3') + net7 = tl.layers.GaussianNoiseLayer(net7, name='gaussian') -if len(net.all_layers) != 2: - raise Exception("layers do not match") + net7.print_layers() + net7.print_params(False) -if len(net.all_params) != 2: - raise Exception("params do not match") + cls.net7_shape = net7.outputs.get_shape().as_list() + cls.net7_layers = net7.all_layers + cls.net7_params = net7.all_params + cls.net7_n_params = net7.count_params() -if net.count_params() != 78500: - raise Exception("params do not match") + # ============== DropconnectDenseLayer ============== -## DropconnectDenseLayer -x = tf.placeholder(tf.float32, shape=(64, 784)) -net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, n_units=100, act=tf.nn.relu, name='dense4') -net = tl.layers.DropconnectDenseLayer(net, keep=0.8, name='dropconnect') + x8 = tf.placeholder(tf.float32, shape=(64, 784)) + net8 = tl.layers.InputLayer(x8, name='input') + net8 = tl.layers.DenseLayer(net8, n_units=100, act=tf.nn.relu, name='dense4') + net8 = tl.layers.DropconnectDenseLayer(net8, keep=0.8, name='dropconnect') -net.print_layers() -net.print_params(False) + net8.print_layers() + net8.print_params(False) -shape = net.outputs.get_shape().as_list() -if shape != [64, 100]: - raise Exception("shape do not match") + cls.net8_shape = net8.outputs.get_shape().as_list() + cls.net8_layers = net8.all_layers + cls.net8_params = net8.all_params + cls.net8_n_params = net8.count_params() -if len(net.all_layers) != 2: - raise Exception("layers do not match") + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net1(self): + self.assertEqual(self.net1_shape[-1], 10) + self.assertEqual(len(self.net1_layers), 1) + self.assertEqual(len(self.net1_params), 2) + self.assertEqual(self.net1_n_params, 310) -if len(net.all_params) != 4: - raise Exception("params do not match") + def test_net2(self): + self.assertEqual(self.net2_shape[-1], 8) + self.assertEqual(len(self.net2_layers), 0) + self.assertEqual(len(self.net2_params), 0) + self.assertEqual(self.net2_n_params, 0) + + def test_net3(self): + self.assertEqual(self.net3_shape, [self.batch_size, 200]) + self.assertEqual(len(self.net3_layers), 1) + self.assertEqual(len(self.net3_params), 3) + self.assertEqual(self.net3_n_params, 401000) -if net.count_params() != 88600: - raise Exception("params do not match") + def test_net4(self): + self.assertEqual(self.net4_shape, [self.batch_size, 50]) + self.assertEqual(len(self.net4_layers), 1) + self.assertEqual(len(self.net4_params), 1) + self.assertEqual(self.net4_n_params, 50000) + + def test_net5(self): + self.assertEqual(self.net5_shape, [self.batch_size, 50]) + self.assertEqual(len(self.net5_layers), 1) + self.assertEqual(len(self.net5_params), 1) + self.assertEqual(self.net5_n_params, 50000) + + def test_net6(self): + self.assertEqual(self.net6_shape[-1], 784) + self.assertEqual(len(self.net6_layers), 2) + self.assertEqual(len(self.net6_params), 4) + self.assertEqual(self.net6_n_params, 308308) + + def test_net7(self): + self.assertEqual(self.net7_shape, [64, 100]) + self.assertEqual(len(self.net7_layers), 2) + self.assertEqual(len(self.net7_params), 2) + self.assertEqual(self.net7_n_params, 78500) + + def test_net8(self): + self.assertEqual(self.net8_shape, [64, 100]) + self.assertEqual(len(self.net8_layers), 2) + self.assertEqual(len(self.net8_params), 4) + self.assertEqual(self.net8_n_params, 88600) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_extend.py b/tests/test_layers_extend.py index 56f512e37..240612f87 100644 --- a/tests/test_layers_extend.py +++ b/tests/test_layers_extend.py @@ -1,28 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## 1D -x = tf.placeholder(tf.float32, (None, 100)) -n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, n_units=100, name='d1') -n = tl.layers.DenseLayer(n, n_units=100, name='d2') - -n = tl.layers.ExpandDimsLayer(n, axis=2) -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 1: - raise Exception("shape do not match") - -n = tl.layers.TileLayer(n, multiples=[-1, 1, 3]) -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 3: - raise Exception("shape do not match") - -n.print_layers() -n.print_params(False) -# print(n.all_layers, n.all_params) -if len(n.all_layers) != 4: - raise Exception("layers do not match") -if len(n.all_params) != 4: - raise Exception("params do not match") + +class Layer_Extend_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, (None, 100)) + n = tl.layers.InputLayer(x, name='in') + n = tl.layers.DenseLayer(n, n_units=100, name='d1') + n = tl.layers.DenseLayer(n, n_units=100, name='d2') + + ## 1D + + n = tl.layers.ExpandDimsLayer(n, axis=2) + cls.shape_1 = n.outputs.get_shape().as_list() + + n = tl.layers.TileLayer(n, multiples=[-1, 1, 3]) + cls.shape_2 = n.outputs.get_shape().as_list() + + n.print_layers() + n.print_params(False) + + cls.layers = n.all_layers + cls.params = n.all_params + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_shape_1(self): + self.assertEqual(self.shape_1[-1], 1) + + def test_shape_2(self): + self.assertEqual(self.shape_2[-1], 3) + + def test_layers(self): + self.assertEqual(len(self.layers), 4) + + def test_params(self): + self.assertEqual(len(self.params), 4) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_flow_control.py b/tests/test_layers_flow_control.py index 41e5c2a81..9ff900b1d 100644 --- a/tests/test_layers_flow_control.py +++ b/tests/test_layers_flow_control.py @@ -1,36 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, shape=(None, 784), name='x') - -# define the network -net_in = tl.layers.InputLayer(x, name='in') -net_in = tl.layers.DropoutLayer(net_in, keep=0.8, name='in/drop') -# net 0 -net_0 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net0/relu1') -net_0 = tl.layers.DropoutLayer(net_0, keep=0.5, name='net0/drop1') -net_0 = tl.layers.DenseLayer(net_0, n_units=800, act=tf.nn.relu, name='net0/relu2') -# net 1 -net_1 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net1/relu1') -net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop1') -net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu2') -net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop2') -net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu3') -# multiplexer -net_mux = tl.layers.MultiplexerLayer(layers=[net_0, net_1], name='mux') -network = tl.layers.ReshapeLayer(net_mux, shape=(-1, 800), name='reshape') -network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') -# output layer -network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') - -network.print_layers() -network.print_params(False) - -if len(network.all_params) != 12: - raise Exception("params do not match") - -if len(network.all_layers) != 13: - raise Exception("layers do not match") - -if len(network.all_drop) != 5: - raise Exception("drop do not match") + +class Layer_Flow_Control_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + x = tf.placeholder(tf.float32, shape=(None, 784), name='x') + + # define the network + net_in = tl.layers.InputLayer(x, name='in') + net_in = tl.layers.DropoutLayer(net_in, keep=0.8, name='in/drop') + # net 0 + net_0 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net0/relu1') + net_0 = tl.layers.DropoutLayer(net_0, keep=0.5, name='net0/drop1') + net_0 = tl.layers.DenseLayer(net_0, n_units=800, act=tf.nn.relu, name='net0/relu2') + # net 1 + net_1 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net1/relu1') + net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop1') + net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu2') + net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop2') + net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu3') + # multiplexer + net_mux = tl.layers.MultiplexerLayer(layers=[net_0, net_1], name='mux') + network = tl.layers.ReshapeLayer(net_mux, shape=(-1, 800), name='reshape') + network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') + # output layer + network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') + + network.print_layers() + network.print_params(False) + + cls.net_shape = network.outputs.get_shape().as_list() + cls.net_layers = network.all_layers + cls.net_params = network.all_params + cls.net_all_drop = network.all_drop + cls.net_n_params = network.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net_shape(self): + self.assertEqual(self.net_shape[-1], 10) + + def test_net_layers(self): + self.assertEqual(len(self.net_layers), 13) + + def test_net_params(self): + self.assertEqual(len(self.net_params), 12) + + def test_net_all_drop(self): + self.assertEqual(len(self.net_all_drop), 5) + + def test_net_n_params(self): + self.assertEqual(self.net_n_params, 3186410) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_importer.py b/tests/test_layers_importer.py index 44330a186..985a3211b 100644 --- a/tests/test_layers_importer.py +++ b/tests/test_layers_importer.py @@ -1,53 +1,87 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +import unittest + +try: + from tests.unittests_helper import CustomTestCase +except ImportError: + from unittests_helper import CustomTestCase -import tensorlayer as tl import tensorflow as tf -from keras.layers import * -from tensorlayer.layers import * +import tensorlayer as tl + +import keras as k + +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3 +from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_arg_scope + slim = tf.contrib.slim -from tensorflow.contrib.slim.python.slim.nets.inception_v3 import (inception_v3, inception_v3_arg_scope) - -sess = tf.InteractiveSession() - -# LambdaLayer -x = tf.placeholder(tf.float32, shape=[None, 784]) - - -def keras_block(x): - x = Dropout(0.8)(x) - x = Dense(100, activation='relu')(x) - # x = Dropout(0.8)(x) - # x = Dense(100, activation='relu')(x) - x = Dropout(0.5)(x) - logits = Dense(10, activation='linear')(x) - return logits - - -network = InputLayer(x, name='input') -network = LambdaLayer(network, fn=keras_block, name='keras') - -# SlimNetsLayer -x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3]) -net_in = tl.layers.InputLayer(x, name='input_layer') -with slim.arg_scope(inception_v3_arg_scope()): - - # Alternatively, you should implement inception_v3 without TensorLayer as follow. - # logits, end_points = inception_v3(X, num_classes=1001, - # is_training=False) - network = tl.layers.SlimNetsLayer( - net_in, - slim_layer=inception_v3, - slim_args={ - 'num_classes': 1001, - 'is_training': False, - # 'dropout_keep_prob' : 0.8, # for training - # 'min_depth' : 16, - # 'depth_multiplier' : 1.0, - # 'prediction_fn' : slim.softmax, - # 'spatial_squeeze' : True, - # 'reuse' : None, - # 'scope' : 'InceptionV3' - }, - name='InceptionV3' # <-- the name should be the same with the ckpt model - ) + + +class Layer_Importer_Test(CustomTestCase): + @classmethod + def setUpClass(cls): + + cls.net_in = dict() + + # ============================= # + # LambdaLayer + # ============================= # + x = tf.placeholder(tf.float32, shape=[None, 784]) + cls.net_in["lambda"] = tl.layers.InputLayer(x, name='input') + + # ============================= # + # SlimNetsLayer + # ============================= # + x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3]) + cls.net_in["slim"] = tl.layers.InputLayer(x, name='input_layer') + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_lambda_layer(self): + def keras_block(x): + x = k.layers.Dropout(0.8)(x) + x = k.layers.Dense(100, activation='relu')(x) + # x = k.layers.Dropout(0.8)(x) + # x = k.layers.Dense(100, activation='relu')(x) + x = k.layers.Dropout(0.5)(x) + logits = k.layers.Dense(10, activation='linear')(x) + + return logits + + with self.assertNotRaises(Exception): + tl.layers.LambdaLayer(self.net_in["lambda"], fn=keras_block, name='keras') + + def test_slim_layer(self): + + with self.assertNotRaises(Exception): + with slim.arg_scope(inception_v3_arg_scope()): + # Alternatively, you should implement inception_v3 without TensorLayer as follow. + # logits, end_points = inception_v3(X, num_classes=1001, + # is_training=False) + tl.layers.SlimNetsLayer( + self.net_in["slim"], + slim_layer=inception_v3, + slim_args={ + 'num_classes': 1001, + 'is_training': False, + # 'dropout_keep_prob' : 0.8, # for training + # 'min_depth' : 16, + # 'depth_multiplier' : 1.0, + # 'prediction_fn' : slim.softmax, + # 'spatial_squeeze' : True, + # 'reuse' : None, + # 'scope' : 'InceptionV3' + }, + name='InceptionV3' # <-- the name should be the same with the ckpt model + ) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_merge.py b/tests/test_layers_merge.py index c85688191..42c19607a 100644 --- a/tests/test_layers_merge.py +++ b/tests/test_layers_merge.py @@ -1,79 +1,109 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -sess = tf.InteractiveSession() -## vector -x = tf.placeholder(tf.float32, shape=[None, 784]) -inputs = tl.layers.InputLayer(x, name='input_layer') -net1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu1_1') -net2 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu2_1') -net = tl.layers.ConcatLayer([net1, net2], concat_dim=1, name='concat_layer') +class Layer_Merge_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + cls.data = dict() + + ############## + # vector # + ############## + + x = tf.placeholder(tf.float32, shape=[None, 784]) + inputs = tl.layers.InputLayer(x, name='input_layer') + + net_v1_1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu1_1') + net_v1_2 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu2_1') + net_v1 = tl.layers.ConcatLayer([net_v1_1, net_v1_2], concat_dim=1, name='concat_layer') + + net_v1.print_params(False) + net_v1.print_layers() + + cls.data["net_vector1"] = dict() + cls.data["net_vector1"]["layers"] = net_v1.all_layers + cls.data["net_vector1"]["params"] = net_v1.all_params + cls.data["net_vector1"]["n_params"] = net_v1.count_params() -net.print_params(False) -net.print_layers() + net_v2_1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_0') + net_v2_2 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_1') + net_v2 = tl.layers.ElementwiseLayer([net_v2_1, net_v2_2], combine_fn=tf.minimum, name='minimum') -if len(net.all_layers) != 3: - raise Exception("layers do not match") + net_v2.print_params(False) + net_v2.print_layers() -if len(net.all_params) != 4: - raise Exception("params do not match") + cls.data["net_vector2"] = dict() + cls.data["net_vector2"]["layers"] = net_v2.all_layers + cls.data["net_vector2"]["params"] = net_v2.all_params + cls.data["net_vector2"]["n_params"] = net_v2.count_params() -if net.count_params() != 157000: - raise Exception("params do not match") + ############# + # Image # + ############# -net_0 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_0') -net_1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_1') -net = tl.layers.ElementwiseLayer([net_0, net_1], combine_fn=tf.minimum, name='minimum') + x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) + inputs = tl.layers.InputLayer(x, name='input') -net.print_params(False) -net.print_layers() + net_im1_1 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c1') + net_im1_2 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c2') + net_im1 = tl.layers.ConcatLayer([net_im1_1, net_im1_2], concat_dim=-1, name='concat') -if len(net.all_layers) != 3: - raise Exception("layers do not match") + net_im1.print_params(False) + net_im1.print_layers() -if len(net.all_params) != 4: - raise Exception("params do not match") + cls.data["net_image1"] = dict() + cls.data["net_image1"]["shape"] = net_im1.outputs.get_shape().as_list() + cls.data["net_image1"]["layers"] = net_im1.all_layers + cls.data["net_image1"]["params"] = net_im1.all_params + cls.data["net_image1"]["n_params"] = net_im1.count_params() -if net.count_params() != 157000: - raise Exception("params do not match") + net_im2 = tl.layers.ElementwiseLayer([net_im1_1, net_im1_2], combine_fn=tf.minimum, name='minimum2') -## image -x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) -inputs = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c1') -net2 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c2') -net = tl.layers.ConcatLayer([net1, net2], concat_dim=-1, name='concat') + net_im2.print_params(False) + net_im2.print_layers() -net.print_params(False) -net.print_layers() + cls.data["net_image2"] = dict() + cls.data["net_image2"]["shape"] = net_im2.outputs.get_shape().as_list() + cls.data["net_image2"]["layers"] = net_im2.all_layers + cls.data["net_image2"]["params"] = net_im2.all_params + cls.data["net_image2"]["n_params"] = net_im2.count_params() -shape = net.outputs.get_shape().as_list() -if shape[1:] != [50, 50, 64]: - raise Exception("shape do not match") + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() -if len(net.all_layers) != 3: - raise Exception("layers do not match") + def test_net_vector1(self): + self.assertEqual(len(self.data["net_vector1"]["layers"]), 3) + self.assertEqual(len(self.data["net_vector1"]["params"]), 4) + self.assertEqual(self.data["net_vector1"]["n_params"], 157000) -if len(net.all_params) != 4: - raise Exception("params do not match") + def test_net_vector2(self): + self.assertEqual(len(self.data["net_vector2"]["layers"]), 3) + self.assertEqual(len(self.data["net_vector2"]["params"]), 4) + self.assertEqual(self.data["net_vector2"]["n_params"], 157000) -if net.count_params() != 1792: - raise Exception("params do not match") + def test_net_image1(self): + self.assertEqual(self.data["net_image1"]["shape"][1:], [50, 50, 64]) + self.assertEqual(len(self.data["net_image1"]["layers"]), 3) + self.assertEqual(len(self.data["net_image1"]["params"]), 4) + self.assertEqual(self.data["net_image1"]["n_params"], 1792) -net = tl.layers.ElementwiseLayer([net1, net2], combine_fn=tf.minimum, name='minimum2') -net.print_params(False) -net.print_layers() + def test_net_image2(self): + self.assertEqual(self.data["net_image2"]["shape"][1:], [50, 50, 32]) + self.assertEqual(len(self.data["net_image2"]["layers"]), 3) + self.assertEqual(len(self.data["net_image2"]["params"]), 4) + self.assertEqual(self.data["net_image2"]["n_params"], 1792) -shape = net.outputs.get_shape().as_list() -if shape[1:] != [50, 50, 32]: - raise Exception("shape do not match") -if len(net.all_layers) != 3: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(net.all_params) != 4: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if net.count_params() != 1792: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_normalization.py b/tests/test_layers_normalization.py index 38480a1f5..4900f211c 100644 --- a/tests/test_layers_normalization.py +++ b/tests/test_layers_normalization.py @@ -1,3 +1,7 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl @@ -6,26 +10,55 @@ def model(x, is_train=True, reuse=False): with tf.variable_scope("model", reuse=reuse): n = tl.layers.InputLayer(x, name='in') n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_1') - n = tl.layers.BatchNormLayer(n, name='norm_batch') + n = tl.layers.BatchNormLayer(n, is_train=is_train, name='norm_batch') n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_2') n = tl.layers.LocalResponseNormLayer(n, name='norm_local') - n = tl.layers.LayerNormLayer(n, name='norm_layer') + n = tl.layers.LayerNormLayer(n, reuse=reuse, name='norm_layer') n = tl.layers.InstanceNormLayer(n, name='norm_instance') return n -x = tf.placeholder(tf.float32, [None, 100, 100, 3]) -net = model(x, True, False) -_ = model(x, False, True) +class Layer_Normalization_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, [None, 100, 100, 3]) + + net_train = model(x, is_train=True, reuse=False) + net_eval = model(x, is_train=False, reuse=True) + + net_train.print_layers() + net_train.print_params(False) + + cls.data = dict() + cls.data["train_network"] = dict() + cls.data["eval_network"] = dict() + + cls.data["train_network"]["layers"] = net_train.all_layers + cls.data["eval_network"]["layers"] = net_eval.all_layers + + cls.data["train_network"]["params"] = net_train.all_params + + cls.data["train_network"]["n_params"] = net_train.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_all_layers(self): + self.assertEqual(len(self.data["train_network"]["layers"]), 6) + self.assertEqual(len(self.data["eval_network"]["layers"]), 6) + + def test_all_params(self): + self.assertEqual(len(self.data["train_network"]["params"]), 12) + + def test_n_params(self): + self.assertEqual(self.data["train_network"]["n_params"], 60560) -net.print_layers() -net.print_params(False) -if len(net.all_layers) != 6: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(net.all_params) != 12: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if net.count_params() != 60560: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_padding.py b/tests/test_layers_padding.py index 8d3d7f065..984e388b7 100644 --- a/tests/test_layers_padding.py +++ b/tests/test_layers_padding.py @@ -1,59 +1,92 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## 1D -x = tf.placeholder(tf.float32, (None, 100, 1)) -n = tl.layers.InputLayer(x) -n1 = tl.layers.ZeroPad1d(n, padding=1) -n1.print_layers() -shape = n1.outputs.get_shape().as_list() -if shape[1:] != [102, 1]: - raise Exception("shape do not match") - -n2 = tl.layers.ZeroPad1d(n, padding=(2, 3)) -n2.print_layers() -shape = n2.outputs.get_shape().as_list() -if shape[1:] != [105, 1]: - raise Exception("shape do not match") - -## 2D -x = tf.placeholder(tf.float32, (None, 100, 100, 3)) -n = tl.layers.InputLayer(x) -n1 = tl.layers.ZeroPad2d(n, padding=2) -n1.print_layers() -shape = n1.outputs.get_shape().as_list() -if shape[1:] != [104, 104, 3]: - raise Exception("shape do not match") - -n2 = tl.layers.ZeroPad2d(n, padding=(2, 3)) -n2.print_layers() -shape = n2.outputs.get_shape().as_list() -if shape[1:] != [104, 106, 3]: - raise Exception("shape do not match") - -n3 = tl.layers.ZeroPad2d(n, padding=((3, 3), (4, 4))) -n3.print_layers() -shape = n3.outputs.get_shape().as_list() -if shape[1:] != [106, 108, 3]: - raise Exception("shape do not match") - -## 3D -x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) -n = tl.layers.InputLayer(x) -n1 = tl.layers.ZeroPad3d(n, padding=2) -n1.print_layers() -shape = n1.outputs.get_shape().as_list() -if shape[1:] != [104, 104, 104, 3]: - raise Exception("shape do not match") - -n2 = tl.layers.ZeroPad3d(n, padding=(2, 3, 4)) -n2.print_layers() -shape = n2.outputs.get_shape().as_list() -if shape[1:] != [104, 106, 108, 3]: - raise Exception("shape do not match") - -n3 = tl.layers.ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) -n3.print_layers() -shape = n3.outputs.get_shape().as_list() -if shape[1:] != [106, 108, 110, 3]: - raise Exception("shape do not match") + +class Layer_Padding_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + ## 1D + x = tf.placeholder(tf.float32, (None, 100, 1)) + n = tl.layers.InputLayer(x) + + n1 = tl.layers.ZeroPad1d(n, padding=1) + n2 = tl.layers.ZeroPad1d(n, padding=(2, 3)) + + n1.print_layers() + n2.print_layers() + + cls.n1_shape = n1.outputs.get_shape().as_list() + cls.n2_shape = n2.outputs.get_shape().as_list() + + ## 2D + x = tf.placeholder(tf.float32, (None, 100, 100, 3)) + n = tl.layers.InputLayer(x) + + n3 = tl.layers.ZeroPad2d(n, padding=2) + n4 = tl.layers.ZeroPad2d(n, padding=(2, 3)) + n5 = tl.layers.ZeroPad2d(n, padding=((3, 3), (4, 4))) + + n3.print_layers() + n4.print_layers() + n5.print_layers() + + cls.n3_shape = n3.outputs.get_shape().as_list() + cls.n4_shape = n4.outputs.get_shape().as_list() + cls.n5_shape = n5.outputs.get_shape().as_list() + + ## 3D + x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) + n = tl.layers.InputLayer(x) + + n6 = tl.layers.ZeroPad3d(n, padding=2) + n7 = tl.layers.ZeroPad3d(n, padding=(2, 3, 4)) + n8 = tl.layers.ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) + + n6.print_layers() + n7.print_layers() + n8.print_layers() + + cls.n6_shape = n6.outputs.get_shape().as_list() + cls.n7_shape = n7.outputs.get_shape().as_list() + cls.n8_shape = n8.outputs.get_shape().as_list() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_n1_shape(self): + self.assertEqual(self.n1_shape[1:], [102, 1]) + + def test_n2_shape(self): + self.assertEqual(self.n2_shape[1:], [105, 1]) + + def test_n3_shape(self): + self.assertEqual(self.n3_shape[1:], [104, 104, 3]) + + def test_n4_shape(self): + self.assertEqual(self.n4_shape[1:], [104, 106, 3]) + + def test_n5_shape(self): + self.assertEqual(self.n5_shape[1:], [106, 108, 3]) + + def test_n6_shape(self): + self.assertEqual(self.n6_shape[1:], [104, 104, 104, 3]) + + def test_n7_shape(self): + self.assertEqual(self.n7_shape[1:], [104, 106, 108, 3]) + + def test_n8_shape(self): + self.assertEqual(self.n8_shape[1:], [106, 108, 110, 3]) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_pooling.py b/tests/test_layers_pooling.py index b94cee8f5..2f987e7cc 100644 --- a/tests/test_layers_pooling.py +++ b/tests/test_layers_pooling.py @@ -1,92 +1,109 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## 1D ======================================================================== -x = tf.placeholder(tf.float32, (None, 100, 1)) -nin = tl.layers.InputLayer(x, name='in1') -nin = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2, name='conv1d') -print(nin) -shape = nin.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape do not match") - -n = tl.layers.MaxPool1d(nin, filter_size=3, strides=2, padding='same', name='maxpool1d') -print(n) -shape = n.outputs.get_shape().as_list() -# print(shape[1:3]) -if shape[1:3] != [25, 32]: - raise Exception("shape do not match") - -n = tl.layers.MeanPool1d(nin, filter_size=3, strides=2, padding='same', name='meanpool1d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[1:3] != [25, 32]: - raise Exception("shape do not match") - -n = tl.layers.GlobalMaxPool1d(nin, name='maxpool1d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 32: - raise Exception("shape do not match") - -n = tl.layers.GlobalMeanPool1d(nin, name='meanpool1d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 32: - raise Exception("shape do not match") - -## 2D ======================================================================== -x = tf.placeholder(tf.float32, (None, 100, 100, 3)) -nin = tl.layers.InputLayer(x, name='in2') -nin = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='conv2d') -print(nin) -shape = nin.outputs.get_shape().as_list() -if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape do not match") - -n = tl.layers.MaxPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool2d') -print(n) -shape = n.outputs.get_shape().as_list() -# print(shape[1:3]) -if shape[1:4] != [25, 25, 32]: - raise Exception("shape do not match") - -n = tl.layers.MeanPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[1:4] != [25, 25, 32]: - raise Exception("shape do not match") - -n = tl.layers.GlobalMaxPool2d(nin, name='maxpool2d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 32: - raise Exception("shape do not match") - -n = tl.layers.GlobalMeanPool2d(nin, name='meanpool2d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape[-1] != 32: - raise Exception("shape do not match") - -## 3D ======================================================================== -x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) -nin = tl.layers.InputLayer(x, name='in') - -n = tl.layers.MeanPool3d(nin, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME', name='meanpool3d') -print(n) -shape = n.outputs.get_shape().as_list() -if shape != [None, 50, 50, 50, 3]: - raise Exception("shape do not match") - -n = tl.layers.GlobalMaxPool3d(nin) -print(n) -shape = n.outputs.get_shape().as_list() -if shape != [None, 3]: - raise Exception("shape do not match") - -n = tl.layers.GlobalMeanPool3d(nin) -print(n) -shape = n.outputs.get_shape().as_list() -if shape != [None, 3]: - raise Exception("shape do not match") + +class Layer_Pooling_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + ## 1D ======================================================================== + + x_1 = tf.placeholder(tf.float32, (None, 100, 1)) + nin_1 = tl.layers.InputLayer(x_1, name='in1') + + n1 = tl.layers.Conv1d(nin_1, n_filter=32, filter_size=5, stride=2, name='conv1d') + n2 = tl.layers.MaxPool1d(n1, filter_size=3, strides=2, padding='same', name='maxpool1d') + n3 = tl.layers.MeanPool1d(n1, filter_size=3, strides=2, padding='same', name='meanpool1d') + n4 = tl.layers.GlobalMaxPool1d(n1, name='maxpool1d') + n5 = tl.layers.GlobalMeanPool1d(n1, name='meanpool1d') + + cls.n1_shape = n1.outputs.get_shape().as_list() + cls.n2_shape = n2.outputs.get_shape().as_list() + cls.n3_shape = n3.outputs.get_shape().as_list() + cls.n4_shape = n4.outputs.get_shape().as_list() + cls.n5_shape = n5.outputs.get_shape().as_list() + + ## 2D ======================================================================== + + x_2 = tf.placeholder(tf.float32, (None, 100, 100, 3)) + nin_2 = tl.layers.InputLayer(x_2, name='in2') + + n6 = tl.layers.Conv2d(nin_2, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='conv2d') + n7 = tl.layers.MaxPool2d(n6, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool2d') + n8 = tl.layers.MeanPool2d(n6, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d') + n9 = tl.layers.GlobalMaxPool2d(n6, name='maxpool2d') + n10 = tl.layers.GlobalMeanPool2d(n6, name='meanpool2d') + + cls.n6_shape = n6.outputs.get_shape().as_list() + cls.n7_shape = n7.outputs.get_shape().as_list() + cls.n8_shape = n8.outputs.get_shape().as_list() + cls.n9_shape = n9.outputs.get_shape().as_list() + cls.n10_shape = n10.outputs.get_shape().as_list() + + ## 3D ======================================================================== + + x_3 = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) + nin_3 = tl.layers.InputLayer(x_3, name='in') + + n11 = tl.layers.MeanPool3d(nin_3, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME', name='meanpool3d') + n12 = tl.layers.GlobalMaxPool3d(nin_3) + n13 = tl.layers.GlobalMeanPool3d(nin_3) + + cls.n11_shape = n11.outputs.get_shape().as_list() + cls.n12_shape = n12.outputs.get_shape().as_list() + cls.n13_shape = n13.outputs.get_shape().as_list() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_n1_shape(self): + self.assertEqual(self.n1_shape[1:3], [50, 32]) + + def test_n2_shape(self): + self.assertEqual(self.n2_shape[1:3], [25, 32]) + + def test_n3_shape(self): + self.assertEqual(self.n3_shape[1:3], [25, 32]) + + def test_n4_shape(self): + self.assertEqual(self.n4_shape[-1], 32) + + def test_n5_shape(self): + self.assertEqual(self.n5_shape[-1], 32) + + def test_n6_shape(self): + self.assertEqual(self.n6_shape[1:4], [50, 50, 32]) + + def test_n7_shape(self): + self.assertEqual(self.n7_shape[1:4], [25, 25, 32]) + + def test_n8_shape(self): + self.assertEqual(self.n8_shape[1:4], [25, 25, 32]) + + def test_n9_shape(self): + self.assertEqual(self.n9_shape[-1], 32) + + def test_n10_shape(self): + self.assertEqual(self.n10_shape[-1], 32) + + def test_n11_shape(self): + self.assertEqual(self.n11_shape, [None, 50, 50, 50, 3]) + + def test_n12_shape(self): + self.assertEqual(self.n12_shape, [None, 3]) + + def test_n13_shape(self): + self.assertEqual(self.n13_shape, [None, 3]) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_recurrent.py b/tests/test_layers_recurrent.py index c416d19a6..feb19dfd5 100644 --- a/tests/test_layers_recurrent.py +++ b/tests/test_layers_recurrent.py @@ -1,346 +1,387 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## RNN encoder ==================================================== -batch_size = 32 -num_steps = 5 -vocab_size = 30 -hidden_size = 20 -keep_prob = 0.8 -is_train = True -input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) -net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='embed') -net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop1') -net = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=hidden_size, n_steps=num_steps, return_last=False, name='lstm1') -lstm1 = net -net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop2') -net = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=hidden_size, n_steps=num_steps, return_last=True, name='lstm2') -lstm2 = net -net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop3') -net = tl.layers.DenseLayer(net, n_units=vocab_size, name='output') - -net.print_layers() -net.print_params(False) - -if len(net.all_layers) != 7: - raise Exception("layers do not match") - -if len(net.all_params) != 7: - raise Exception("params do not match") - -if net.count_params() != 7790: - raise Exception("params do not match") - -## CNN+RNN encoder ==================================================== -image_size = 100 -batch_size = 10 -num_steps = 5 - -x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) -net = tl.layers.InputLayer(x, name='in') -net = tl.layers.Conv2d(net, n_filter=32, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn1') -net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool1') -net = tl.layers.Conv2d(net, n_filter=10, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn2') -net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool2') -net = tl.layers.FlattenLayer(net, name='flatten') -net = tl.layers.ReshapeLayer(net, shape=(-1, num_steps, int(net.outputs._shape[-1]))) -rnn = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=num_steps, return_last=False, return_seq_2d=True, name='rnn') -net = tl.layers.DenseLayer(rnn, n_units=3, name='out') - -net.print_layers() -net.print_params(False) - -if len(net.all_layers) != 8: - raise Exception("layers do not match") - -if len(net.all_params) != 8: - raise Exception("params do not match") - -if net.count_params() != 562245: - raise Exception("params do not match") - -## Bidirectional Synced input and output -batch_size = 10 -num_steps = 5 -vocab_size = 30 -hidden_size = 20 -input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) -net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='emb') -net = tl.layers.BiRNNLayer( - net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=hidden_size, n_steps=num_steps, return_last=False, return_seq_2d=False, name='birnn') - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape do not match") - -if len(net.all_layers) != 2: - raise Exception("layers do not match") - -if len(net.all_params) != 5: - raise Exception("params do not match") - -if net.count_params() != 7160: - raise Exception("params do not match") - -# n_layer=2 -net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='emb2') -net = tl.layers.BiRNNLayer( - net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=hidden_size, n_steps=num_steps, n_layer=2, return_last=False, return_seq_2d=False, name='birnn2') - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape do not match") - -if len(net.all_layers) != 2: - raise Exception("layers do not match") - -if len(net.all_params) != 9: - raise Exception("params do not match") - -if net.count_params() != 13720: - raise Exception("params do not match") - -## ConvLSTMLayer TODO -# image_size = 100 -# batch_size = 10 -# num_steps = 5 -# x = tf.placeholder(tf.float32, shape=[batch_size, num_steps, image_size, image_size, 3]) -# net = tl.layers.InputLayer(x, name='in2') -# net = tl.layers.ConvLSTMLayer(net, -# feature_map=1, -# filter_size=(3, 3), -# cell_fn=tl.layers.BasicConvLSTMCell, -# initializer=tf.random_uniform_initializer(-0.1, 0.1), -# n_steps=num_steps, -# initial_state=None, -# return_last=False, -# return_seq_2d=False, -# name='convlstm') - -## Dynamic Synced input and output -batch_size = 32 -num_steps = 5 -vocab_size = 30 -embedding_size = 20 -keep_prob = 0.8 -is_train = True -input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input") -nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=vocab_size, embedding_size=embedding_size, name='seq_embedding') -rnn = tl.layers.DynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=(keep_prob if is_train else None), - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - return_last=False, - return_seq_2d=True, - name='dynamicrnn') -net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o") - -net.print_layers() -net.print_params(False) - -shape = rnn.outputs.get_shape().as_list() -if shape[-1] != embedding_size: - raise Exception("shape do not match") - -shape = net.outputs.get_shape().as_list() -if shape[-1] != vocab_size: - raise Exception("shape do not match") - -if len(net.all_layers) != 3: - raise Exception("layers do not match") - -if len(net.all_params) != 5: - raise Exception("params do not match") - -if net.count_params() != 4510: - raise Exception("params do not match") - -# n_layer=3 -nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=vocab_size, embedding_size=embedding_size, name='seq_embedding2') -rnn = tl.layers.DynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=(keep_prob if is_train else None), - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - n_layer=3, - return_last=False, - return_seq_2d=True, - name='dynamicrnn2') -net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o2") - -shape = rnn.outputs.get_shape().as_list() -if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape do not match") - -net = tl.layers.DynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=None, - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - n_layer=3, - return_last=False, - return_seq_2d=False, - name='dynamicrnn3') -# net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o3") - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if (shape[-1] != embedding_size) or (len(shape) != 3): - raise Exception("shape do not match") - -net = tl.layers.DynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=None, - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - n_layer=1, - return_last=True, - return_seq_2d=False, - name='dynamicrnn4') -net.print_layers() -net.print_params(False) -shape = net.outputs.get_shape().as_list() -if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape do not match") - -net = tl.layers.DynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=None, - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - n_layer=1, - return_last=True, - return_seq_2d=True, - name='dynamicrnn5') -net.print_layers() -net.print_params(False) -shape = net.outputs.get_shape().as_list() -if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape do not match") - -## BiDynamic Synced input and output -rnn = tl.layers.BiDynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=(keep_prob if is_train else None), - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - return_last=False, - return_seq_2d=True, - name='bidynamicrnn') -net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o4") - -net.print_layers() -net.print_params(False) - -shape = rnn.outputs.get_shape().as_list() -if shape[-1] != embedding_size * 2: - raise Exception("shape do not match") - -shape = net.outputs.get_shape().as_list() -if shape[-1] != vocab_size: - raise Exception("shape do not match") - -if len(net.all_layers) != 3: - raise Exception("layers do not match") - -if len(net.all_params) != 7: - raise Exception("params do not match") - -if net.count_params() != 8390: - raise Exception("params do not match") - -# n_layer=2 -rnn = tl.layers.BiDynamicRNNLayer( - nin, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=embedding_size, - dropout=(keep_prob if is_train else None), - sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), - n_layer=2, - return_last=False, - return_seq_2d=True, - name='bidynamicrnn2') -net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o5") - -net.print_layers() -net.print_params(False) - -shape = rnn.outputs.get_shape().as_list() -if shape[-1] != embedding_size * 2: - raise Exception("shape do not match") - -shape = net.outputs.get_shape().as_list() -if shape[-1] != vocab_size: - raise Exception("shape do not match") - -if len(net.all_layers) != 3: - raise Exception("layers do not match") - -if len(net.all_params) != 11: - raise Exception("params do not match") - -if net.count_params() != 18150: - raise Exception("params do not match") - -## Seq2Seq -from tensorlayer.layers import EmbeddingInputlayer, Seq2Seq, retrieve_seq_length_op2, DenseLayer -batch_size = 32 -encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs") -decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs") -target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs") -target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask() -with tf.variable_scope("model"): - # for chatbot, you can use the same embedding layer, - # for translation, you may want to use 2 seperated embedding layers - with tf.variable_scope("embedding") as vs: - net_encode = EmbeddingInputlayer(inputs=encode_seqs, vocabulary_size=10000, embedding_size=200, name='seq_embed') - vs.reuse_variables() - # tl.layers.set_name_reuse(True) - net_decode = EmbeddingInputlayer(inputs=decode_seqs, vocabulary_size=10000, embedding_size=200, name='seq_embed') - net = Seq2Seq( - net_encode, - net_decode, - cell_fn=tf.contrib.rnn.BasicLSTMCell, - n_hidden=200, - initializer=tf.random_uniform_initializer(-0.1, 0.1), - encode_sequence_length=retrieve_seq_length_op2(encode_seqs), - decode_sequence_length=retrieve_seq_length_op2(decode_seqs), - initial_state_encode=None, - dropout=None, - n_layer=2, - return_seq_2d=True, - name='Seq2seq') -net = DenseLayer(net, n_units=10000, act=tf.identity, name='oo') -e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost') -y = tf.nn.softmax(net.outputs) - -net.print_layers() -net.print_params(False) - -shape = net.outputs.get_shape().as_list() -if shape[-1] != 10000: - raise Exception("shape do not match") - -if len(net.all_layers) != 5: - raise Exception("layers do not match") - -if len(net.all_params) != 11: - raise Exception("params do not match") - -if net.count_params() != 5293200: - raise Exception("params do not match") + +class Layer_Recurrent_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + cls.net1_batch_size = 32 + cls.net2_batch_size = 10 + cls.net3_batch_size = 10 + cls.net5_batch_size = 32 + cls.net11_batch_size = 32 + + cls.vocab_size = 30 + cls.hidden_size = 20 + cls.image_size = 100 + cls.embedding_size = 20 + + cls.num_steps = 5 + + cls.keep_prob = 0.8 + cls.is_train = True + + # =============================== RNN encoder =============================== + + input_data = tf.placeholder(tf.int32, [cls.net1_batch_size, cls.num_steps]) + + net1 = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=cls.vocab_size, embedding_size=cls.hidden_size, name='embed') + net1 = tl.layers.DropoutLayer(net1, keep=cls.keep_prob, is_fix=True, is_train=cls.is_train, name='drop1') + net1 = tl.layers.RNNLayer(net1, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=cls.hidden_size, n_steps=cls.num_steps, return_last=False, name='lstm1') + + # lstm1 = net1 + + net1 = tl.layers.DropoutLayer(net1, keep=cls.keep_prob, is_fix=True, is_train=cls.is_train, name='drop2') + net1 = tl.layers.RNNLayer(net1, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=cls.hidden_size, n_steps=cls.num_steps, return_last=True, name='lstm2') + + # lstm2 = net1 + + net1 = tl.layers.DropoutLayer(net1, keep=cls.keep_prob, is_fix=True, is_train=cls.is_train, name='drop3') + net1 = tl.layers.DenseLayer(net1, n_units=cls.vocab_size, name='output') + + net1.print_layers() + net1.print_params(False) + + cls.net1_shape = net1.outputs.get_shape().as_list() + cls.net1_layers = net1.all_layers + cls.net1_params = net1.all_params + cls.net1_n_params = net1.count_params() + + # =============================== CNN+RNN encoder =============================== + + x2 = tf.placeholder(tf.float32, shape=[cls.net2_batch_size, cls.image_size, cls.image_size, 1]) + net2 = tl.layers.InputLayer(x2, name='in') + + net2 = tl.layers.Conv2d(net2, n_filter=32, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn1') + net2 = tl.layers.MaxPool2d(net2, filter_size=(2, 2), strides=(2, 2), name='pool1') + net2 = tl.layers.Conv2d(net2, n_filter=10, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn2') + net2 = tl.layers.MaxPool2d(net2, filter_size=(2, 2), strides=(2, 2), name='pool2') + + net2 = tl.layers.FlattenLayer(net2, name='flatten') + net2 = tl.layers.ReshapeLayer(net2, shape=(-1, cls.num_steps, int(net2.outputs._shape[-1]))) + + rnn = tl.layers.RNNLayer( + net2, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=cls.num_steps, return_last=False, return_seq_2d=True, name='rnn') + + net2 = tl.layers.DenseLayer(rnn, n_units=3, name='out') + + net2.print_layers() + net2.print_params(False) + + cls.net2_shape = net2.outputs.get_shape().as_list() + cls.net2_layers = net2.all_layers + cls.net2_params = net2.all_params + cls.net2_n_params = net2.count_params() + + # =============================== Bidirectional Synced input and output =============================== + + x3 = tf.placeholder(tf.int32, [cls.net3_batch_size, cls.num_steps]) + + net3 = tl.layers.EmbeddingInputlayer(inputs=x3, vocabulary_size=cls.vocab_size, embedding_size=cls.hidden_size, name='emb') + net3 = tl.layers.BiRNNLayer( + net3, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=cls.hidden_size, n_steps=cls.num_steps, return_last=False, return_seq_2d=False, name='birnn') + + net3.print_layers() + net3.print_params(False) + + cls.net3_shape = net3.outputs.get_shape().as_list() + cls.net3_layers = net3.all_layers + cls.net3_params = net3.all_params + cls.net3_n_params = net3.count_params() + + # n_layer=2 + net4 = tl.layers.EmbeddingInputlayer(inputs=x3, vocabulary_size=cls.vocab_size, embedding_size=cls.hidden_size, name='emb2') + net4 = tl.layers.BiRNNLayer( + net4, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.hidden_size, + n_steps=cls.num_steps, + n_layer=2, + return_last=False, + return_seq_2d=False, + name='birnn2') + + net4.print_layers() + net4.print_params(False) + + cls.net4_shape = net4.outputs.get_shape().as_list() + cls.net4_layers = net4.all_layers + cls.net4_params = net4.all_params + cls.net4_n_params = net4.count_params() + + ## TODO: ConvLSTMLayer + # image_size = 100 + # batch_size = 10 + # num_steps = 5 + # x = tf.placeholder(tf.float32, shape=[batch_size, num_steps, image_size, image_size, 3]) + # net = tl.layers.InputLayer(x, name='in2') + # net = tl.layers.ConvLSTMLayer(net, + # feature_map=1, + # filter_size=(3, 3), + # cell_fn=tl.layers.BasicConvLSTMCell, + # initializer=tf.random_uniform_initializer(-0.1, 0.1), + # n_steps=num_steps, + # initial_state=None, + # return_last=False, + # return_seq_2d=False, + # name='convlstm') + + # =============================== Dynamic Synced input and output =============================== + + input_seqs = tf.placeholder(dtype=tf.int64, shape=[cls.net5_batch_size, None], name="input") + nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=cls.vocab_size, embedding_size=cls.embedding_size, name='seq_embedding') + + rnn = tl.layers.DynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=(cls.keep_prob if cls.is_train else None), + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + return_last=False, + return_seq_2d=True, + name='dynamicrnn') + + net5 = tl.layers.DenseLayer(rnn, n_units=cls.vocab_size, name="o") + + net5.print_layers() + net5.print_params(False) + + cls.net5_shape = net5.outputs.get_shape().as_list() + cls.net5_rnn_shape = rnn.outputs.get_shape().as_list() + cls.net5_layers = net5.all_layers + cls.net5_params = net5.all_params + cls.net5_n_params = net5.count_params() + + # n_layer=3 + nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=cls.vocab_size, embedding_size=cls.embedding_size, name='seq_embedding2') + rnn = tl.layers.DynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=(cls.keep_prob if cls.is_train else None), + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + n_layer=3, + return_last=False, + return_seq_2d=True, + name='dynamicrnn2') + + # net6 = tl.layers.DenseLayer(rnn, n_units=cls.vocab_size, name="o2") + + net6 = tl.layers.DynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=None, + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + n_layer=3, + return_last=False, + return_seq_2d=False, + name='dynamicrnn3') + + # net6 = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o3") + + net6.print_layers() + net6.print_params(False) + + cls.net6_shape = net6.outputs.get_shape().as_list() + cls.net6_rnn_shape = rnn.outputs.get_shape().as_list() + + net7 = tl.layers.DynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=None, + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + n_layer=1, + return_last=True, + return_seq_2d=False, + name='dynamicrnn4') + + net7.print_layers() + net7.print_params(False) + + cls.net7_shape = net7.outputs.get_shape().as_list() + + net8 = tl.layers.DynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=None, + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + n_layer=1, + return_last=True, + return_seq_2d=True, + name='dynamicrnn5') + + net8.print_layers() + net8.print_params(False) + + cls.net8_shape = net8.outputs.get_shape().as_list() + + # =============================== BiDynamic Synced input and output =============================== + + rnn = tl.layers.BiDynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=(cls.keep_prob if cls.is_train else None), + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + return_last=False, + return_seq_2d=True, + name='bidynamicrnn') + + net9 = tl.layers.DenseLayer(rnn, n_units=cls.vocab_size, name="o4") + + net9.print_layers() + net9.print_params(False) + + cls.net9_shape = net9.outputs.get_shape().as_list() + cls.net9_rnn_shape = rnn.outputs.get_shape().as_list() + cls.net9_layers = net9.all_layers + cls.net9_params = net9.all_params + cls.net9_n_params = net9.count_params() + + # n_layer=2 + rnn = tl.layers.BiDynamicRNNLayer( + nin, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=cls.embedding_size, + dropout=(cls.keep_prob if cls.is_train else None), + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + n_layer=2, + return_last=False, + return_seq_2d=True, + name='bidynamicrnn2') + + net10 = tl.layers.DenseLayer(rnn, n_units=cls.vocab_size, name="o5") + + net10.print_layers() + net10.print_params(False) + + cls.net10_shape = net10.outputs.get_shape().as_list() + cls.net10_rnn_shape = rnn.outputs.get_shape().as_list() + cls.net10_layers = net10.all_layers + cls.net10_params = net10.all_params + cls.net10_n_params = net10.count_params() + + # =============================== Seq2Seq =============================== + + encode_seqs = tf.placeholder(dtype=tf.int64, shape=[cls.net11_batch_size, None], name="encode_seqs") + decode_seqs = tf.placeholder(dtype=tf.int64, shape=[cls.net11_batch_size, None], name="decode_seqs") + # target_seqs = tf.placeholder(dtype=tf.int64, shape=[cls.net11_batch_size, None], name="target_seqs") + # target_mask = tf.placeholder(dtype=tf.int64, shape=[cls.net11_batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask() + + with tf.variable_scope("model"): + # for chatbot, you can use the same embedding layer, + # for translation, you may want to use 2 seperated embedding layers + + with tf.variable_scope("embedding") as vs: + net_encode = tl.layers.EmbeddingInputlayer(inputs=encode_seqs, vocabulary_size=10000, embedding_size=200, name='seq_embed') + vs.reuse_variables() + # tl.layers.set_name_reuse(True) + net_decode = tl.layers.EmbeddingInputlayer(inputs=decode_seqs, vocabulary_size=10000, embedding_size=200, name='seq_embed') + + net11 = tl.layers.Seq2Seq( + net_encode, + net_decode, + cell_fn=tf.contrib.rnn.BasicLSTMCell, + n_hidden=200, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + encode_sequence_length=tl.layers.retrieve_seq_length_op2(encode_seqs), + decode_sequence_length=tl.layers.retrieve_seq_length_op2(decode_seqs), + initial_state_encode=None, + dropout=None, + n_layer=2, + return_seq_2d=True, + name='Seq2seq') + + net11 = tl.layers.DenseLayer(net11, n_units=10000, act=tf.identity, name='oo') + + # e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net11.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost') + # y = tf.nn.softmax(net11.outputs) + + net11.print_layers() + net11.print_params(False) + + cls.net11_shape = net11.outputs.get_shape().as_list() + cls.net11_layers = net11.all_layers + cls.net11_params = net11.all_params + cls.net11_n_params = net11.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net1(self): + self.assertEqual(self.net1_shape, [self.net1_batch_size, self.vocab_size]) + self.assertEqual(len(self.net1_layers), 7) + self.assertEqual(len(self.net1_params), 7) + self.assertEqual(self.net1_n_params, 7790) + + def test_net2(self): + self.assertEqual(self.net2_shape, [self.net2_batch_size, 3]) + self.assertEqual(len(self.net2_layers), 8) + self.assertEqual(len(self.net2_params), 8) + self.assertEqual(self.net2_n_params, 562245) + + def test_net3(self): + self.assertEqual(self.net3_shape[1:3], [self.num_steps, self.hidden_size * 2]) + self.assertEqual(len(self.net3_layers), 2) + self.assertEqual(len(self.net3_params), 5) + self.assertEqual(self.net3_n_params, 7160) + + def test_net4(self): + self.assertEqual(self.net4_shape[1:3], [self.num_steps, self.hidden_size * 2]) + self.assertEqual(len(self.net4_layers), 2) + self.assertEqual(len(self.net4_params), 9) + self.assertEqual(self.net4_n_params, 13720) + + def test_net5(self): + self.assertEqual(self.net5_shape[-1], self.vocab_size) + self.assertEqual(self.net5_rnn_shape[-1], self.embedding_size) + self.assertEqual(len(self.net5_layers), 3) + self.assertEqual(len(self.net5_params), 5) + self.assertEqual(self.net5_n_params, 4510) + + def test_net6(self): + self.assertEqual(self.net6_shape[-1], self.embedding_size) + self.assertEqual(self.net6_rnn_shape[-1], self.embedding_size) + + def test_net7(self): + self.assertEqual(self.net7_shape[-1], self.embedding_size) + + def test_net8(self): + self.assertEqual(self.net8_shape[-1], self.embedding_size) + + def test_net9(self): + self.assertEqual(self.net9_shape[-1], self.vocab_size) + self.assertEqual(self.net9_rnn_shape[-1], self.embedding_size * 2) + self.assertEqual(len(self.net9_layers), 3) + self.assertEqual(len(self.net9_params), 7) + self.assertEqual(self.net9_n_params, 8390) + + def test_net10(self): + self.assertEqual(self.net10_shape[-1], self.vocab_size) + self.assertEqual(self.net10_rnn_shape[-1], self.embedding_size * 2) + self.assertEqual(len(self.net10_layers), 3) + self.assertEqual(len(self.net10_params), 11) + self.assertEqual(self.net10_n_params, 18150) + + def test_net11(self): + self.assertEqual(self.net11_shape[-1], 10000) + self.assertEqual(len(self.net11_layers), 5) + self.assertEqual(len(self.net11_params), 11) + self.assertEqual(self.net11_n_params, 5293200) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_layers_shape.py b/tests/test_layers_shape.py index 09efe5b80..22eb87952 100644 --- a/tests/test_layers_shape.py +++ b/tests/test_layers_shape.py @@ -1,62 +1,95 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) -net = tl.layers.InputLayer(x, name='input') -## Flatten -net = tl.layers.FlattenLayer(net, name='flatten') +class Layer_Shape_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) + net = tl.layers.InputLayer(x, name='input') + + ## Flatten + net1 = tl.layers.FlattenLayer(net, name='flatten') + + net1.print_layers() + net1.print_params(False) + + cls.net1_shape = net1.outputs.get_shape().as_list() + cls.net1_layers = net1.all_layers + cls.net1_params = net1.all_params + cls.net1_n_params = net1.count_params() + + ## Reshape + net2 = tl.layers.ReshapeLayer(net1, shape=(-1, 28, 28, 1), name='reshape') + + net2.print_layers() + net2.print_params(False) + + cls.net2_shape = net2.outputs.get_shape().as_list() + cls.net2_layers = net2.all_layers + cls.net2_params = net2.all_params + cls.net2_n_params = net2.count_params() + + ## TransposeLayer + net3 = tl.layers.TransposeLayer(net2, perm=[0, 1, 3, 2], name='trans') + + net3.print_layers() + net3.print_params(False) + + cls.net3_shape = net3.outputs.get_shape().as_list() + cls.net3_layers = net3.all_layers + cls.net3_params = net3.all_params + cls.net3_n_params = net3.count_params() -net.print_layers() -net.print_params(False) + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() -shape = net.outputs.get_shape().as_list() -if shape[-1] != 784: - raise Exception("shape do not match") + def test_net1_shape(self): + self.assertEqual(self.net1_shape[-1], 784) -if len(net.all_layers) != 1: - raise Exception("layers do not match") + def test_net1_layers(self): + self.assertEqual(len(self.net1_layers), 1) -if len(net.all_params) != 0: - raise Exception("params do not match") + def test_net1_params(self): + self.assertEqual(len(self.net1_params), 0) -if net.count_params() != 0: - raise Exception("params do not match") + def test_net1_n_params(self): + self.assertEqual(self.net1_n_params, 0) -## Reshape -net = tl.layers.ReshapeLayer(net, shape=(-1, 28, 28, 1), name='reshape') + def test_net2_shape(self): + self.assertEqual(self.net2_shape[1:], [28, 28, 1]) -net.print_layers() -net.print_params(False) + def test_net2_layers(self): + self.assertEqual(len(self.net2_layers), 2) -shape = net.outputs.get_shape().as_list() -if shape[1:] != [28, 28, 1]: - raise Exception("shape do not match") + def test_net2_params(self): + self.assertEqual(len(self.net2_params), 0) -if len(net.all_layers) != 2: - raise Exception("layers do not match") + def test_net2_n_params(self): + self.assertEqual(self.net2_n_params, 0) -if len(net.all_params) != 0: - raise Exception("params do not match") + def test_net3_shape(self): + self.assertEqual(self.net3_shape[1:], [28, 1, 28]) -if net.count_params() != 0: - raise Exception("params do not match") + def test_net3_layers(self): + self.assertEqual(len(self.net3_layers), 3) -## TransposeLayer -net = tl.layers.TransposeLayer(net, perm=[0, 1, 3, 2], name='trans') + def test_net3_params(self): + self.assertEqual(len(self.net3_params), 0) -net.print_layers() -net.print_params(False) + def test_net3_n_params(self): + self.assertEqual(self.net3_n_params, 0) -shape = net.outputs.get_shape().as_list() -if shape[1:] != [28, 1, 28]: - raise Exception("shape do not match") -if len(net.all_layers) != 3: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(net.all_params) != 0: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if net.count_params() != 0: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_spatial_transformer.py b/tests/test_layers_spatial_transformer.py index 6935528a5..0976d0f49 100644 --- a/tests/test_layers_spatial_transformer.py +++ b/tests/test_layers_spatial_transformer.py @@ -1,8 +1,15 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + +try: + from tests.unittests_helper import CustomTestCase +except ImportError: + from unittests_helper import CustomTestCase + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) - def model(x, is_train, reuse): with tf.variable_scope("STN", reuse=reuse): @@ -27,21 +34,46 @@ def model(x, is_train, reuse): return n, s -net, s = model(x, is_train=True, reuse=False) -_, _ = model(x, is_train=False, reuse=True) +class Layer_Spatial_Transformer_Test(CustomTestCase): + @classmethod + def setUpClass(cls): + cls.x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) + + net, s = model(cls.x, is_train=True, reuse=False) + + net.print_layers() + net.print_params(False) + + cls.s_shape = s.outputs.get_shape().as_list() + cls.net_layers = net.all_layers + cls.net_params = net.all_params + cls.net_n_params = net.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_reuse(self): + + with self.assertNotRaises(Exception): + _, _ = model(self.x, is_train=True, reuse=True) + + def test_net_shape(self): + self.assertEqual(self.s_shape[1:], [40, 40, 1]) + + def test_net_layers(self): + self.assertEqual(len(self.net_layers), 9) + + def test_net_params(self): + self.assertEqual(len(self.net_params), 12) -net.print_layers() -net.print_params(False) + def test_net_n_params(self): + self.assertEqual(self.net_n_params, 1667980) -shape = s.outputs.get_shape().as_list() -if shape[1:] != [40, 40, 1]: - raise Exception("shape do not match") -if len(net.all_layers) != 9: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(net.all_params) != 12: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if net.count_params() != 1667980: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_special_activation.py b/tests/test_layers_special_activation.py index c50591eea..9a087c96c 100644 --- a/tests/test_layers_special_activation.py +++ b/tests/test_layers_special_activation.py @@ -1,41 +1,70 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, shape=[None, 30]) -net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, n_units=10, name='dense') -net = tl.layers.PReluLayer(net, name='prelu') -net.print_layers() -net.print_params(False) +class Layer_Special_Activation_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, shape=[None, 30]) + net = tl.layers.InputLayer(x, name='input') + net = tl.layers.DenseLayer(net, n_units=10, name='dense') + net1 = tl.layers.PReluLayer(net, name='prelu') + + net1.print_layers() + net1.print_params(False) + + cls.net1_shape = net1.outputs.get_shape().as_list() + cls.net1_layers = net1.all_layers + cls.net1_params = net1.all_params + cls.net1_n_params = net1.count_params() + + net2 = tl.layers.PReluLayer(net1, channel_shared=True, name='prelu2') + + net2.print_layers() + net2.print_params(False) + + cls.net2_shape = net2.outputs.get_shape().as_list() + cls.net2_layers = net2.all_layers + cls.net2_params = net2.all_params + cls.net2_n_params = net2.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net1_shape(self): + self.assertEqual(self.net1_shape[-1], 10) + + def test_net1_all_layers(self): + self.assertEqual(len(self.net1_layers), 2) -shape = net.outputs.get_shape().as_list() -if shape[-1] != 10: - raise Exception("shape do not match") + def test_net1_all_params(self): + self.assertEqual(len(self.net1_params), 3) -if len(net.all_layers) != 2: - raise Exception("layers do not match") + def test_net1_n_params(self): + self.assertEqual(self.net1_n_params, 320) -if len(net.all_params) != 3: - raise Exception("params do not match") + def test_net2_shape(self): + self.assertEqual(self.net2_shape[-1], 10) -if net.count_params() != 320: - raise Exception("params do not match") + def test_net2_all_layers(self): + self.assertEqual(len(self.net2_layers), 3) -net = tl.layers.PReluLayer(net, channel_shared=True, name='prelu2') + def test_net2_all_params(self): + self.assertEqual(len(self.net2_params), 4) -net.print_layers() -net.print_params(False) + def test_net2_n_params(self): + self.assertEqual(self.net2_n_params, 321) -shape = net.outputs.get_shape().as_list() -if shape[-1] != 10: - raise Exception("shape do not match") -if len(net.all_layers) != 3: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(net.all_params) != 4: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if net.count_params() != 321: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_stack.py b/tests/test_layers_stack.py index b473c00d2..fe10113c1 100644 --- a/tests/test_layers_stack.py +++ b/tests/test_layers_stack.py @@ -1,44 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -x = tf.placeholder(tf.float32, shape=[None, 30]) -net = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.DenseLayer(net, n_units=10, name='dense1') -net2 = tl.layers.DenseLayer(net, n_units=10, name='dense2') -net3 = tl.layers.DenseLayer(net, n_units=10, name='dense3') -net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack') -net.print_layers() -net.print_params(False) +class Layer_Stack_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + x = tf.placeholder(tf.float32, shape=[None, 30]) + net = tl.layers.InputLayer(x, name='input') + net1 = tl.layers.DenseLayer(net, n_units=10, name='dense1') + net2 = tl.layers.DenseLayer(net, n_units=10, name='dense2') + net3 = tl.layers.DenseLayer(net, n_units=10, name='dense3') + net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack') + + net.print_layers() + net.print_params(False) + + cls.net_shape = net.outputs.get_shape().as_list() + cls.layers = net.all_layers + cls.params = net.all_params + cls.n_params = net.count_params() + + cls.net = tl.layers.UnStackLayer(net, axis=1, name='unstack') + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net_shape(self): + self.assertEqual(self.net_shape[-1], 10) -shape = net.outputs.get_shape().as_list() -if shape[-1] != 10: - raise Exception("shape do not match") + def test_layers(self): + self.assertEqual(len(self.layers), 4) -if len(net.all_layers) != 4: - raise Exception("layers do not match") + def test_params(self): + self.assertEqual(len(self.params), 6) + self.assertEqual(self.n_params, 930) -if len(net.all_params) != 6: - raise Exception("params do not match") + def test_unstack(self): -if net.count_params() != 930: - raise Exception("params do not match") + for n in self.net: + shape = n.outputs.get_shape().as_list() -net = tl.layers.UnStackLayer(net, axis=1, name='unstack') -for n in net: - print(n, n.outputs) - shape = n.outputs.get_shape().as_list() - if shape[-1] != 10: - raise Exception("shape do not match") + self.assertEqual(shape[-1], 10) + self.assertEqual(len(n.all_layers), 4) + self.assertEqual(len(n.all_params), 6) + self.assertEqual(n.count_params(), 930) - # n.print_layers() - # n.print_params(False) - if len(n.all_layers) != 4: - raise Exception("layers do not match") +if __name__ == '__main__': - if len(n.all_params) != 6: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) - if n.count_params() != 930: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_super_resolution.py b/tests/test_layers_super_resolution.py index 4850d66ff..b2427195d 100644 --- a/tests/test_layers_super_resolution.py +++ b/tests/test_layers_super_resolution.py @@ -1,48 +1,73 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -## 1D -t_signal = tf.placeholder('float32', [10, 100, 4], name='x') -n = tl.layers.InputLayer(t_signal, name='in') -n = tl.layers.Conv1d(n, n_filter=32, filter_size=3, stride=1, padding='SAME', name='conv1d') -n = tl.layers.SubpixelConv1d(n, scale=2, name='subpixel') -print(n.outputs.shape) -# ... (10, 200, 2) -n.print_layers() -n.print_params(False) -shape = n.outputs.get_shape().as_list() -if shape != [10, 200, 16]: - raise Exception("shape do not match") +class Layer_Super_Resolution_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + t_signal = tf.placeholder('float32', [10, 100, 4], name='x') + n = tl.layers.InputLayer(t_signal, name='in') + n = tl.layers.Conv1d(n, n_filter=32, filter_size=3, stride=1, padding='SAME', name='conv1d') + net1 = tl.layers.SubpixelConv1d(n, scale=2, name='subpixel') + + net1.print_layers() + net1.print_params(False) + + cls.net1_shape = net1.outputs.get_shape().as_list() + cls.net1_layers = net1.all_layers + cls.net1_params = net1.all_params + cls.net1_n_params = net1.count_params() + + ## 2D + x = tf.placeholder('float32', [10, 100, 100, 3], name='x') + n = tl.layers.InputLayer(x, name='in') + n = tl.layers.Conv2d(n, n_filter=32, filter_size=(3, 2), strides=(1, 1), padding='SAME', name='conv2d') + net2 = tl.layers.SubpixelConv2d(n, scale=2, name='subpixel2d') + + net2.print_layers() + net2.print_params(False) + + cls.net2_shape = net2.outputs.get_shape().as_list() + cls.net2_layers = net2.all_layers + cls.net2_params = net2.all_params + cls.net2_n_params = net2.count_params() + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net1_shape(self): + self.assertEqual(self.net1_shape, [10, 200, 16]) + + def test_net1_layers(self): + self.assertEqual(len(self.net1_layers), 2) + + def test_net1_params(self): + self.assertEqual(len(self.net1_params), 2) -if len(n.all_layers) != 2: - raise Exception("layers do not match") + def test_net1_n_params(self): + self.assertEqual(self.net1_n_params, 416) -if len(n.all_params) != 2: - raise Exception("params do not match") + def test_net2_shape(self): + self.assertEqual(self.net2_shape, [10, 200, 200, 8]) -if n.count_params() != 416: - raise Exception("params do not match") + def test_net2_layers(self): + self.assertEqual(len(self.net2_layers), 2) -## 2D -x = tf.placeholder('float32', [10, 100, 100, 3], name='x') -n = tl.layers.InputLayer(x, name='in') -n = tl.layers.Conv2d(n, n_filter=32, filter_size=(3, 2), strides=(1, 1), padding='SAME', name='conv2d') -n = tl.layers.SubpixelConv2d(n, scale=2, name='subpixel2d') -print(n.outputs.shape) + def test_net2_params(self): + self.assertEqual(len(self.net2_params), 2) -n.print_layers() -n.print_params(False) + def test_net2_n_params(self): + self.assertEqual(self.net2_n_params, 608) -shape = n.outputs.get_shape().as_list() -if shape != [10, 200, 200, 8]: - raise Exception("shape do not match") -if len(n.all_layers) != 2: - raise Exception("layers do not match") +if __name__ == '__main__': -if len(n.all_params) != 2: - raise Exception("params do not match") + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if n.count_params() != 608: - raise Exception("params do not match") + unittest.main() diff --git a/tests/test_layers_time_distributed.py b/tests/test_layers_time_distributed.py index 7425c2f8e..bac25201a 100644 --- a/tests/test_layers_time_distributed.py +++ b/tests/test_layers_time_distributed.py @@ -1,31 +1,61 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + +try: + from tests.unittests_helper import CustomTestCase +except ImportError: + from unittests_helper import CustomTestCase + import tensorflow as tf -from tensorlayer.layers import InputLayer, TimeDistributedLayer, DenseLayer - -sess = tf.InteractiveSession() -batch_size = 32 -timestep = 20 -input_dim = 100 - -## no reuse -x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs") -net = InputLayer(x, name='input') -net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense') - -if net.outputs.get_shape().as_list() != [32, 20, 50]: - raise Exception("shape do not match") -# ... (32, 20, 50) -net.print_params(False) -if net.count_params() != 5050: - raise Exception("params do not match") - - -## reuse -def model(x, is_train=True, reuse=False): - with tf.variable_scope("model", reuse=reuse): - net = InputLayer(x, name='input') - net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense') +import tensorlayer as tl + + +def model(x, is_train=True, reuse=False, name_scope="env1"): + with tf.variable_scope(name_scope, reuse=reuse): + net = tl.layers.InputLayer(x, name='input') + net = tl.layers.TimeDistributedLayer(net, layer_class=tl.layers.DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense') return net -net_train = model(x, is_train=True, reuse=False) -net_test = model(x, is_train=False, reuse=True) +class Layer_Time_Distributed_Test(CustomTestCase): + @classmethod + def setUpClass(cls): + + batch_size = 32 + timestep = 20 + input_dim = 100 + + cls.x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs") + net = model(cls.x, is_train=True, reuse=False) + + cls.net_shape = net.outputs.get_shape().as_list() + cls.n_params = net.count_params() + net.print_params(False) + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_net_shape(self): + self.assertEqual(self.net_shape, [32, 20, 50]) + + def test_net_n_params(self): + self.assertEqual(self.n_params, 5050) + + def test_reuse(self): + + with self.assertNotRaises(Exception): + model(self.x, is_train=True, reuse=False, name_scope="env2") + model(self.x, is_train=False, reuse=True, name_scope="env2") + + with self.assertRaises(Exception): + model(self.x, is_train=True, reuse=False) # Already defined model with the same var_scope + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_mnist_simple.py b/tests/test_mnist_simple.py index 980053ef6..5829d0017 100644 --- a/tests/test_mnist_simple.py +++ b/tests/test_mnist_simple.py @@ -1,56 +1,101 @@ -#! /usr/bin/python +#!/usr/bin/env python # -*- coding: utf-8 -*- +import unittest + +try: + from tests.unittests_helper import CustomTestCase +except ImportError: + from unittests_helper import CustomTestCase import tensorflow as tf import tensorlayer as tl -sess = tf.InteractiveSession() - -# prepare data -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -# for fashion_MNIST dataset test -# X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 784)) -# define placeholder -x = tf.placeholder(tf.float32, shape=[None, 784], name='x') -y_ = tf.placeholder(tf.int64, shape=[None], name='y_') - -# define the network -network = tl.layers.InputLayer(x, name='input') -network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') -network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu1') -network = tl.layers.DropoutLayer(network, keep=0.8, name='drop2') -network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu2') -network = tl.layers.DropoutLayer(network, keep=0.8, name='drop3') -# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to -# speed up computation, so we use identity here. -# see tf.nn.sparse_softmax_cross_entropy_with_logits() -network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') - -# define cost function and metric. -y = network.outputs -cost = tl.cost.cross_entropy(y, y_, name='cost') -correct_prediction = tf.equal(tf.argmax(y, 1), y_) -acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) -y_op = tf.argmax(tf.nn.softmax(y), 1) - -# define the optimizer -train_params = network.all_params -train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params) - -# initialize all variables in the session -tl.layers.initialize_global_variables(sess) - -# print network information -network.print_params() -network.print_layers() - -# train the network -tl.utils.fit( - sess, network, train_op, cost, X_train, y_train, x, y_, acc=acc, batch_size=500, n_epoch=1, print_freq=1, X_val=X_val, y_val=y_val, eval_train=False) - -# evaluation -tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost) - -# save the network to .npz file -tl.files.save_npz(network.all_params, name='model.npz') -sess.close() + +class Simple_MNIST_Test(CustomTestCase): + @classmethod + def setUpClass(cls): + + # define placeholders + cls.x = tf.placeholder(tf.float32, shape=[None, 784], name='x') + cls.y_ = tf.placeholder(tf.int64, shape=[None], name='y_') + + # define the network + network = tl.layers.InputLayer(cls.x, name='input') + network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') + network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu1') + network = tl.layers.DropoutLayer(network, keep=0.8, name='drop2') + network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu2') + network = tl.layers.DropoutLayer(network, keep=0.8, name='drop3') + + # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to + # speed up computation, so we use identity here. + # see tf.nn.sparse_softmax_cross_entropy_with_logits() + cls.network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') + + # define cost function and metric. + y = cls.network.outputs + cls.cost = tl.cost.cross_entropy(y, cls.y_, name='cost') + + correct_prediction = tf.equal(tf.argmax(y, 1), cls.y_) + + cls.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + # y_op = tf.argmax(tf.nn.softmax(y), 1) + + # define the optimizer + train_params = cls.network.all_params + cls.train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cls.cost, var_list=train_params) + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_reuse_vgg(self): + + # prepare data + X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + # for fashion_MNIST dataset test + # X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 784)) + + with self.assertNotRaises(Exception): + with tf.Session() as sess: + + # initialize all variables in the session + tl.layers.initialize_global_variables(sess) + + # print network information + self.network.print_params() + self.network.print_layers() + + # train the network + tl.utils.fit( + sess, + self.network, + self.train_op, + self.cost, + X_train, + y_train, + self.x, + self.y_, + acc=self.acc, + batch_size=500, + n_epoch=1, + print_freq=1, + X_val=X_val, + y_val=y_val, + eval_train=False) + + # evaluation + tl.utils.test(sess, self.network, self.acc, X_test, y_test, self.x, self.y_, batch_size=None, cost=self.cost) + + # save the network to .npz file + tl.files.save_npz(self.network.all_params, name='model.npz') + sess.close() + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_models.py b/tests/test_models.py index 7800a918d..206e93807 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,62 +1,104 @@ +#!/usr/bin/env python # -*- coding: utf-8 -*- +import unittest + +try: + from tests.unittests_helper import CustomTestCase +except ImportError: + from unittests_helper import CustomTestCase import tensorflow as tf import tensorlayer as tl -with tf.Graph().as_default() as graph: - # - Classify ImageNet classes with VGG16, see `tutorial_models_vgg16.py __` - x = tf.placeholder(tf.float32, [None, 224, 224, 3]) - # get the whole model - vgg = tl.models.VGG16(x) - # restore pre-trained VGG parameters - # sess = tf.InteractiveSession() - # vgg.restore_params(sess) - # use for inferencing - probs = tf.nn.softmax(vgg.outputs) - if len(vgg.all_layers) != 22: - raise Exception("layers do not match") - - if len(vgg.all_params) != 32: - raise Exception("params do not match") - -with tf.Graph().as_default() as graph: - # - Extract features with VGG16 and Train a classifier with 100 classes - x = tf.placeholder(tf.float32, [None, 224, 224, 3]) - # get VGG without the last layer - vgg = tl.models.VGG16(x, end_with='fc2_relu') - - if len(vgg.all_layers) != 21: - raise Exception("layers do not match") - - if len(vgg.all_params) != 30: - raise Exception("params do not match") - - # add one more layer - net = tl.layers.DenseLayer(vgg, n_units=100, name='out') - # initialize all parameters - # sess = tf.InteractiveSession() - # tl.layers.initialize_global_variables(sess) - # restore pre-trained VGG parameters - # vgg.restore_params(sess) - # train your own classifier (only update the last layer) - train_params = tl.layers.get_variables_with_name('out') - if len(train_params) != 2: - raise Exception("params do not match") - -with tf.Graph().as_default() as graph: - # - Reuse model - x1 = tf.placeholder(tf.float32, [None, 224, 224, 3]) - x2 = tf.placeholder(tf.float32, [None, 224, 224, 3]) - # get VGG without the last layer - vgg1 = tl.models.VGG16(x1, end_with='fc2_relu') - # reuse the parameters of vgg1 with different input - vgg2 = tl.models.VGG16(x2, end_with='fc2_relu', reuse=True) - # restore pre-trained VGG parameters (as they share parameters, we don’t need to restore vgg2) - # sess = tf.InteractiveSession() - # vgg1.restore_params(sess) - - if len(vgg1.all_layers) != 21: - raise Exception("layers do not match") - - if len(vgg1.all_params) != 30: - raise Exception("params do not match") + +class VGG_Model_Test(CustomTestCase): + @classmethod + def setUpClass(cls): + + with tf.Graph().as_default(): + # - Classify ImageNet classes with VGG16, see `tutorial_models_vgg16.py __` + x = tf.placeholder(tf.float32, [None, 224, 224, 3]) + # get the whole model + vgg1 = tl.models.VGG16(x) + # restore pre-trained VGG parameters + # sess = tf.InteractiveSession() + # vgg.restore_params(sess) + # use for inferencing + # probs = tf.nn.softmax(vgg1.outputs) + + cls.vgg1_layers = vgg1.all_layers + cls.vgg1_params = vgg1.all_params + + with tf.Graph().as_default(): + # - Extract features with VGG16 and Train a classifier with 100 classes + x = tf.placeholder(tf.float32, [None, 224, 224, 3]) + # get VGG without the last layer + vgg2 = tl.models.VGG16(x, end_with='fc2_relu') + + cls.vgg2_layers = vgg2.all_layers + cls.vgg2_params = vgg2.all_params + + # add one more layer + _ = tl.layers.DenseLayer(vgg2, n_units=100, name='out') + # initialize all parameters + # sess = tf.InteractiveSession() + # tl.layers.initialize_global_variables(sess) + # restore pre-trained VGG parameters + # vgg.restore_params(sess) + # train your own classifier (only update the last layer) + + cls.vgg2_train_params = tl.layers.get_variables_with_name('out') + + with tf.Graph().as_default() as graph: + # - Reuse model + x = tf.placeholder(tf.float32, [None, 224, 224, 3]) + # get VGG without the last layer + vgg3 = tl.models.VGG16(x, end_with='fc2_relu') + # reuse the parameters of vgg1 with different input + # restore pre-trained VGG parameters (as they share parameters, we don’t need to restore vgg2) + # sess = tf.InteractiveSession() + # vgg1.restore_params(sess) + + cls.vgg3_layers = vgg3.all_layers + cls.vgg3_params = vgg3.all_params + cls.vgg3_graph = graph + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_vgg1_layers(self): + self.assertEqual(len(self.vgg1_layers), 22) + + def test_vgg2_layers(self): + self.assertEqual(len(self.vgg2_layers), 21) + + def test_vgg3_layers(self): + self.assertEqual(len(self.vgg3_layers), 21) + + def test_vgg1_params(self): + self.assertEqual(len(self.vgg1_params), 32) + + def test_vgg2_params(self): + self.assertEqual(len(self.vgg2_params), 30) + + def test_vgg3_params(self): + self.assertEqual(len(self.vgg3_params), 30) + + def test_vgg2_train_params(self): + self.assertEqual(len(self.vgg2_train_params), 2) + + def test_reuse_vgg(self): + + with self.assertNotRaises(Exception): + with self.vgg3_graph.as_default(): + x = tf.placeholder(tf.float32, [None, 224, 224, 3]) + _ = tl.models.VGG16(x, end_with='fc2_relu', reuse=True) + + +if __name__ == '__main__': + + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) + + unittest.main() diff --git a/tests/test_pydocstyle.py b/tests/test_pydocstyle.py index 96a22fa8b..0d6f0059b 100644 --- a/tests/test_pydocstyle.py +++ b/tests/test_pydocstyle.py @@ -1,21 +1,17 @@ -from pydocstyle.checker import check -from pydocstyle.checker import violations +#!/usr/bin/env python +# -*- coding: utf-8 -*- -import testing +import unittest -registry = violations.ErrorRegistry - -_disabled_checks = [ - 'D202', # No blank lines allowed after function docstring - 'D205', # 1 blank line required between summary line and description -] +try: + import tests.testing as testing +except ImportError: + import testing +from pydocstyle.checker import check +from pydocstyle.checker import violations -def check_all_files(): - for filename in testing.list_all_py_files(): - for err in check([filename]): - if not err.code in _disabled_checks: - yield err +registry = violations.ErrorRegistry def lookup_error_params(code): @@ -25,16 +21,41 @@ def lookup_error_params(code): return error_params -violations = list(check_all_files()) +class PyDOC_Style_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + cls.violations = list() + + _disabled_checks = [ + 'D202', # No blank lines allowed after function docstring + 'D205', # 1 blank line required between summary line and description + ] + + for filename in testing.list_all_py_files(): + + print(filename) + for err in check([filename]): + + if not err.code in _disabled_checks: + cls.violations.append(err) + + def test_violations(self): + if self.violations: + + counts = dict() + + for err in self.violations: + counts[err.code] = counts.get(err.code, 0) + 1 + + for n, code in sorted([(n, code) for code, n in counts.items()], reverse=True): + p = lookup_error_params(code) + print('%s %8d %s' % (code, n, p.short_desc)) + + print() + + #raise Exception('PyDoc Coding Style: %d violations have been found' % ( len(self.violations))) ## TODO: Correct these errors to allow Exception -if violations: - counts = dict() - for e in violations: - print(e) - counts[e.code] = counts.get(e.code, 0) + 1 - for n, code in sorted([(n, code) for code, n in counts.items()], reverse=True): - p = lookup_error_params(code) - print('%s %8d %s' % (code, n, p.short_desc)) - print('%s %8d violations' % ('tot', len(violations))) - # TODO: exit(1) +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_reuse_mlp.py b/tests/test_reuse_mlp.py index 965e4c0d4..bb346eed8 100644 --- a/tests/test_reuse_mlp.py +++ b/tests/test_reuse_mlp.py @@ -1,15 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest + import tensorflow as tf import tensorlayer as tl -sess = tf.InteractiveSession() - -# prepare data -X_train, y_train, X_val, y_val, X_test, y_test = \ - tl.files.load_mnist_dataset(shape=(-1,784)) -# define placeholder -x = tf.placeholder(tf.float32, shape=[None, 784], name='x') -y_ = tf.placeholder(tf.int64, shape=[None], name='y_') - # define the network def mlp(x, is_train=True, reuse=False): @@ -25,16 +20,30 @@ def mlp(x, is_train=True, reuse=False): return network -# define inferences -net_train = mlp(x, is_train=True, reuse=False) -net_test = mlp(x, is_train=False, reuse=True) +class MLP_Reuse_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + # define placeholder + cls.x = tf.placeholder(tf.float32, shape=[None, 784], name='x') + + # define inferences + mlp(cls.x, is_train=True, reuse=False) + mlp(cls.x, is_train=False, reuse=True) + + @classmethod + def tearDownClass(cls): + tf.reset_default_graph() + + def test_reuse(self): + + with self.assertRaises(Exception): + mlp(self.x, is_train=False, reuse=False) # Already defined model with the same var_scope + + +if __name__ == '__main__': -try: - is_except = False - net_test = mlp(x, is_train=False, reuse=False) -except Exception as e: - is_except = True - print(e) + # tf.logging.set_verbosity(tf.logging.INFO) + tf.logging.set_verbosity(tf.logging.DEBUG) -if is_except == False: - raise Exception("it should not be success") + unittest.main() diff --git a/tests/test_yapf_format.py b/tests/test_yapf_format.py index e65675cef..c2569ca51 100644 --- a/tests/test_yapf_format.py +++ b/tests/test_yapf_format.py @@ -1,30 +1,53 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + import sys -import testing +import unittest + +try: + import tests.testing as testing +except ImportError: + import testing + from yapf.yapflib.yapf_api import FormatCode def _read_utf_8_file(filename): - if sys.version_info.major == 2: - return unicode(open(filename, 'rb').read(), 'utf-8') + if sys.version_info.major == 2: ## Python 2 specific + with open(filename, 'rb') as f: + return unicode(f.read(), 'utf-8') else: - return open(filename, encoding='utf-8').read() + with open(filename, encoding='utf-8') as f: + return f.read() + + +class YAPF_Style_Test(unittest.TestCase): + @classmethod + def setUpClass(cls): + + cls.files_badly_formated = list() + + for filename in testing.list_all_py_files(): + + print(filename) + code = _read_utf_8_file(filename) + + # https://pypi.python.org/pypi/yapf/0.20.2#example-as-a-module + diff, changed = FormatCode(code, filename=filename, style_config='.style.yapf', print_diff=True) + + if changed: + print(diff) + cls.files_badly_formated.append(filename) + def test_unformated_files(self): + if self.files_badly_formated: + print() -def check_all_files(): - for filename in testing.list_all_py_files(): - print(filename) - code = _read_utf_8_file(filename) - # https://pypi.python.org/pypi/yapf/0.20.2#example-as-a-module - diff, changed = FormatCode(code, filename=filename, style_config='.style.yapf', print_diff=True) - if changed: - print(diff) - yield filename + for filename in self.files_badly_formated: + print('yapf -i %s' % filename) + raise Exception("Bad Coding Style: %d files need to be formatted, run the following commands to fix" % len(self.files_badly_formated)) -unformatted = list(check_all_files()) -if unformatted: - print('%d files need to be formatted, run the following commands to fix' % len(unformatted)) - for filename in unformatted: - print('yapf -i %s' % filename) - exit(1) +if __name__ == '__main__': + unittest.main() diff --git a/tests/unittests_helper.py b/tests/unittests_helper.py new file mode 100644 index 000000000..b4137c9b0 --- /dev/null +++ b/tests/unittests_helper.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import unittest +from contextlib import contextmanager + + +class CustomTestCase(unittest.TestCase): + @contextmanager + def assertNotRaises(self, exc_type): + try: + yield None + except exc_type: + raise self.failureException('{} raised'.format(exc_type.__name__))