From 32e92e26b57f7ae8022445b76275cb237b10567c Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 10:41:36 +0200 Subject: [PATCH 01/21] __init__ import error message repositioned at a more appropriate location --- tensorlayer/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index ee130177d..dd59e4cd8 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -2,9 +2,9 @@ from __future__ import absolute_import try: - install_instr = "Please make sure you install a recent enough version of TensorFlow." import tensorflow except ImportError: + install_instr = "Please make sure you install a recent enough version of TensorFlow." raise ImportError("__init__.py : Could not import TensorFlow." + install_instr) from . import activation From 90e41d5d7b5227f6f65a638c9575db4a81398274 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 14:47:28 +0200 Subject: [PATCH 02/21] gitignore updated with venv environment --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 6da63706f..2e36fa083 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ dist docs/_build tensorlayer.egg-info tensorlayer/__pacache__ +venv/ \ No newline at end of file From d04a355a54313c330c7ae801c69e483907494407 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 14:48:52 +0200 Subject: [PATCH 03/21] Typo fixed in tensorlayer.layers.convolution.py --- tensorlayer/layers/convolution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index a49a93ade..e3ff78f4c 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -600,7 +600,7 @@ def __init__( size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] else: - raise Exception("Donot support shape %s" % self.inputs.get_shape()) + raise Exception("Do not support shape %s" % self.inputs.get_shape()) logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) with tf.variable_scope(name): try: From e010005b9b3a41a1d06b2a45b708f53466c81e48 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 14:50:26 +0200 Subject: [PATCH 04/21] Deprecation warning added for tl.layer.deconv2d with backward compatibility restored - Issue #479 --- tensorlayer/layers/convolution.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index e3ff78f4c..3804edd0c 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import warnings import tensorflow as tf @@ -1480,6 +1481,7 @@ def __init__( def deconv2d(layer, n_filter=32, + n_out_channel=None, filter_size=(3, 3), out_size=(30, 30), strides=(2, 2), @@ -1535,8 +1537,15 @@ def deconv2d(layer, b_init_args = {} if act is None: act = tf.identity + if n_out_channel is not None: + warnings.warn("deprecated", DeprecationWarning) + logging.warning( + "DeprecationWarning: `n_out_channel` argument in tl.layers.DeConv2d is deprecated and will be removed in 1.9, please change for `n_filter`") + n_filter = n_out_channel + if len(strides) != 2: raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.") + if tf.__version__ > '1.3': logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) inputs = layer.outputs From 3a212df6fbb759fd231fc953e7b7359763606bd9 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 14:52:13 +0200 Subject: [PATCH 05/21] Deprecation warning added for Layer API change: `layer` argument changed to `prev_layer` - Issue #479 Additional Modification using the syntax super for inheritance - more pythonic --- tensorlayer/layers/binary.py | 104 +++++--- tensorlayer/layers/convolution.py | 283 +++++++++++++++------- tensorlayer/layers/core.py | 17 +- tensorlayer/layers/extend.py | 18 +- tensorlayer/layers/flow_control.py | 2 +- tensorlayer/layers/importer.py | 68 ++++-- tensorlayer/layers/merge.py | 7 +- tensorlayer/layers/normalization.py | 53 ++-- tensorlayer/layers/object_detection.py | 13 +- tensorlayer/layers/padding.py | 58 ++++- tensorlayer/layers/pooling.py | 121 ++++++--- tensorlayer/layers/recurrent.py | 72 ++++-- tensorlayer/layers/shape.py | 44 +++- tensorlayer/layers/spatial_transformer.py | 10 +- tensorlayer/layers/special_activation.py | 11 +- 15 files changed, 636 insertions(+), 245 deletions(-) diff --git a/tensorlayer/layers/binary.py b/tensorlayer/layers/binary.py index 9e1af031c..e0d5f4695 100644 --- a/tensorlayer/layers/binary.py +++ b/tensorlayer/layers/binary.py @@ -126,7 +126,8 @@ class BinaryDenseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_units=100, act=tf.identity, use_gemm=False, @@ -136,13 +137,18 @@ def __init__( b_init_args=None, name='binary_dense', ): + # super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") @@ -230,7 +236,8 @@ class BinaryConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -255,18 +262,22 @@ def __init__( # data_format=None, name='binary_cnn2d', ): + # super(BinaryConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(BinaryConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - + if act is None: + act = tf.identity if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: - act = tf.identity logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, act.__name__)) @@ -326,7 +337,8 @@ class TernaryDenseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_units=100, act=tf.identity, use_gemm=False, @@ -336,16 +348,20 @@ def __init__( b_init_args=None, name='ternary_dense', ): + # super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") - if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") @@ -432,7 +448,8 @@ class TernaryConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -457,18 +474,20 @@ def __init__( # data_format=None, name='ternary_cnn2d', ): + # super(TernaryConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(TernaryConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - + if act is None: + act = tf.identity if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: - act = tf.identity logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, act.__name__)) @@ -535,7 +554,8 @@ class DorefaDenseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release bitW=1, bitA=3, n_units=100, @@ -547,16 +567,20 @@ def __init__( b_init_args=None, name='dorefa_dense', ): + # super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") - if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") @@ -646,7 +670,8 @@ class DorefaConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release bitW=1, bitA=3, n_filter=32, @@ -673,18 +698,23 @@ def __init__( # data_format=None, name='dorefa_cnn2d', ): + # super(DorefaConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DorefaConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} + if act is None: + act = tf.identity if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: - act = tf.identity logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, act.__name__)) @@ -729,14 +759,19 @@ class SignLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='sign', ): + # super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(SignLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs logging.info("SignLayer %s" % (self.name)) + with tf.variable_scope(name): # self.outputs = tl.act.sign(self.inputs) self.outputs = quantize(self.inputs) @@ -760,15 +795,20 @@ class ScaleLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release init_scale=0.05, name='scale', ): + # super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ScaleLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs logging.info("ScaleLayer %s: init_scale: %f" % (self.name, init_scale)) + with tf.variable_scope(name): # scale = tf.get_variable(name='scale_factor', init, trainable=True, ) scale = tf.get_variable("scale", shape=[1], initializer=tf.constant_initializer(value=init_scale)) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index 3804edd0c..7dc871854 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -63,7 +63,8 @@ class Conv1dLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release act=tf.identity, shape=(5, 1, 5), stride=1, @@ -76,6 +77,13 @@ def __init__( b_init_args=None, name='cnn1d', ): + # super(Conv1dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(Conv1dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if act is None: act = tf.identity if W_init_args is None: @@ -83,8 +91,6 @@ def __init__( if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__)) with tf.variable_scope(name): @@ -177,7 +183,8 @@ class Conv2dLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release act=tf.identity, shape=(5, 5, 1, 100), strides=(1, 1, 1, 1), @@ -190,15 +197,20 @@ def __init__( data_format=None, name='cnn_layer', ): + # super(Conv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(Conv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity + logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name): @@ -296,7 +308,8 @@ class DeConv2dLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release act=tf.identity, shape=(3, 3, 128, 256), output_shape=(1, 256, 256, 128), @@ -308,15 +321,20 @@ def __init__( b_init_args=None, name='decnn2d_layer', ): + # super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity + logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) # logging.info(" DeConv2dLayer: Untested") @@ -376,7 +394,8 @@ class Conv3dLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release act=tf.identity, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1), @@ -387,15 +406,20 @@ def __init__( b_init_args=None, name='cnn3d_layer', ): + # super(Conv3dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(Conv3dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity + logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name): @@ -453,7 +477,8 @@ class DeConv3dLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release act=tf.identity, shape=(2, 2, 2, 128, 256), output_shape=(1, 12, 32, 32, 128), @@ -465,15 +490,20 @@ def __init__( b_init_args=None, name='decnn3d_layer', ): + # super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity + logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) @@ -521,28 +551,40 @@ class UpSampling2dLayer(Layer): def __init__( self, - prev_layer, - size, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + size=list(), is_scale=True, method=0, align_corners=False, name='upsample2d_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + + assert isinstance(size, (list, tuple)) and len(size) == 2 + if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) size_w = size[1] * int(self.inputs.get_shape()[1]) size = [int(size_h), int(size_w)] + elif len(self.inputs.get_shape()) == 4: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[1]) size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] + else: raise Exception("Donot support shape %s" % self.inputs.get_shape()) + logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) + with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -581,15 +623,23 @@ class DownSampling2dLayer(Layer): def __init__( self, - prev_layer, - size, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + size=list(), is_scale=True, method=0, align_corners=False, name='downsample2d_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + + assert isinstance(size, (list, tuple)) and len(size) == 2 + if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) @@ -602,7 +652,9 @@ def __init__( size = [int(size_h), int(size_w)] else: raise Exception("Do not support shape %s" % self.inputs.get_shape()) + logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) + with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -665,7 +717,8 @@ class DeformableConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release offset_layer=None, # shape=(3, 3, 1, 100), n_filter=32, @@ -813,8 +866,13 @@ def _tf_batch_map_offsets(inputs, offsets, grid_offset): return mapped_vals - Layer.__init__(self, prev_layer=[prev_layer, offset_layer], name=name) + # super(DeformableConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DeformableConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + self.offset_layer = offset_layer if act is None: act = tf.identity @@ -999,28 +1057,37 @@ class AtrousConv2dLayer(Layer): """ - def __init__(self, - prev_layer, - n_filter=32, - filter_size=(3, 3), - rate=2, - act=tf.identity, - padding='SAME', - W_init=tf.truncated_normal_initializer(stddev=0.02), - b_init=tf.constant_initializer(value=0.0), - W_init_args=None, - b_init_args=None, - name='atrou2d'): + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + n_filter=32, + filter_size=(3, 3), + rate=2, + act=tf.identity, + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='atrou2d'): + + # super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity + logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__)) + with tf.variable_scope(name): shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -1094,27 +1161,35 @@ class _SeparableConv2dLayer(Layer): # TODO """ - def __init__(self, - prev_layer, - n_filter, - filter_size=5, - strides=(1, 1), - padding='valid', - data_format='channels_last', - dilation_rate=(1, 1), - depth_multiplier=1, - act=tf.identity, - use_bias=True, - depthwise_initializer=None, - pointwise_initializer=None, - bias_initializer=tf.zeros_initializer, - depthwise_regularizer=None, - pointwise_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - name='atrou2d'): - Layer.__init__(self, prev_layer=prev_layer, name=name) + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + n_filter=16, + filter_size=5, + strides=(1, 1), + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + depth_multiplier=1, + act=tf.identity, + use_bias=True, + depthwise_initializer=None, + pointwise_initializer=None, + bias_initializer=tf.zeros_initializer, + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + name='atrou2d'): + + # super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + if tf.__version__ > "0.12.1": raise Exception("This layer only supports for TF 1.0+") @@ -1368,7 +1443,8 @@ class Conv2d(Layer): def __init__( self, - layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -1409,14 +1485,17 @@ def __init__( # data_format=data_format, # name=name) + # super(Conv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(Conv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=layer, name=name) - self.inputs = layer.outputs - if act is None: act = tf.identity @@ -1453,7 +1532,7 @@ def __init__( if len(strides) != 2: raise ValueError("len(strides) should be 2, Conv2d and Conv2dLayer are different.") try: - pre_channel = int(layer.outputs.get_shape()[-1]) + pre_channel = int(prev_layer.outputs.get_shape()[-1]) except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net pre_channel = 1 logging.info("[warnings] unknow input channels, set to 1") @@ -1623,18 +1702,26 @@ class DeConv3d(Layer): """ - def __init__(self, - prev_layer, - n_filter=32, - filter_size=(3, 3, 3), - strides=(2, 2, 2), - padding='SAME', - act=tf.identity, - W_init=tf.truncated_normal_initializer(stddev=0.02), - b_init=tf.constant_initializer(value=0.0), - name='decnn3d'): - Layer.__init__(self, prev_layer=prev_layer, name=name) + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + n_filter=32, + filter_size=(3, 3, 3), + strides=(2, 2, 2), + padding='SAME', + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + name='decnn3d'): + + # super(DeConv3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DeConv3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) with tf.variable_scope(name) as vs: @@ -1718,7 +1805,8 @@ class DepthwiseConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release shape=(3, 3), strides=(1, 1), act=tf.identity, @@ -1731,23 +1819,27 @@ def __init__( b_init_args=None, name='depthwise_conv2d', ): + # super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: act = tf.identity logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) + try: pre_channel = int(prev_layer.outputs.get_shape()[-1]) except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net pre_channel = 1 - logging.info("[warnings] unknow input channels, set to 1") + logging.info("[warnings] unknown input channels, set to 1") shape = [shape[0], shape[1], pre_channel, depth_multiplier] @@ -1814,7 +1906,8 @@ class SeparableConv2d(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_filter=100, filter_size=(3, 3), strides=(1, 1), @@ -1845,8 +1938,13 @@ def __init__( # if b_init_args is None: # b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(SeparableConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(SeparableConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print(self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__) logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" \ % (self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__)) @@ -1915,6 +2013,7 @@ class GroupConv2d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_filter=32, filter_size=(3, 3), strides=(2, 2), @@ -1927,13 +2026,19 @@ def __init__( b_init_args=None, name='groupconv', ): # Windaway + + # super(GroupConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GroupConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs groupConv = lambda i, k: tf.nn.conv2d(i, k, strides=[1, strides[0], strides[1], 1], padding=padding) channels = int(self.inputs.get_shape()[-1]) diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index 7c28b91b4..17baee9cd 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import time +import warnings import numpy as np import tensorflow as tf @@ -380,7 +381,9 @@ class Layer(object): """ - def __init__(self, prev_layer=None, name=None): + # def __init__(self, prev_layer=None, name=None): + def __init__(self, prev_layer=None, layer=None, name=None): # TODO change this line for the one above for the 1.9 release + if name is None: raise ValueError('Layer must have a name.') @@ -389,6 +392,15 @@ def __init__(self, prev_layer=None, name=None): name = scope_name + '/' + name self.name = name + # TODO remove this whole block for the 1.9 release + # ==== START Deprecation warning for layer ===== + if layer is not None: + warnings.warn("deprecated", DeprecationWarning) + logging.warning("DeprecationWarning: `layer` argument in %s.%s is deprecated and will be removed in 1.9, please change for `prev_layer`" % + (self.__module__, self.__class__.__name__)) + prev_layer = layer + # ==== END Deprecation warning for layer ===== + # get all properties of previous layer(s) if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer # Hint : list(), dict() is pass by value (shallow), without them, @@ -396,12 +408,15 @@ def __init__(self, prev_layer=None, name=None): self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) self.all_drop = dict(prev_layer.all_drop) + elif isinstance(prev_layer, list): # 2. for layer have multiply inputs i.e. ConcatLayer self.all_layers = list_remove_repeat(sum([l.all_layers for l in prev_layer], [])) self.all_params = list_remove_repeat(sum([l.all_params for l in prev_layer], [])) self.all_drop = dict(sum([list(l.all_drop.items()) for l in prev_layer], [])) + elif isinstance(prev_layer, tf.Tensor): raise Exception("Please use InputLayer to convert Tensor/Placeholder to TL layer") + elif prev_layer is not None: # tl.models self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 2b5190f3c..3d348e52a 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -35,11 +35,16 @@ class ExpandDimsLayer(Layer): def __init__( self, - prev_layer, - axis, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + axis=0, name='expand_dims', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs logging.info("ExpandDimsLayer %s: axis:%d" % (self.name, axis)) @@ -83,10 +88,15 @@ class TileLayer(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release multiples=None, name='tile', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(TileLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(TileLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs logging.info("TileLayer %s: multiples:%s" % (self.name, multiples)) diff --git a/tensorlayer/layers/flow_control.py b/tensorlayer/layers/flow_control.py index c3c0afbe0..1f97fc8b4 100644 --- a/tensorlayer/layers/flow_control.py +++ b/tensorlayer/layers/flow_control.py @@ -53,7 +53,7 @@ class MultiplexerLayer(Layer): """ def __init__(self, layers, name='mux_layer'): - Layer.__init__(self, prev_layer=layers, name=name) + super(MultiplexerLayer, self).__init__(prev_layer=layers, name=name) self.n_inputs = len(layers) self.inputs = [] diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py index efc9689e9..39c222010 100644 --- a/tensorlayer/layers/importer.py +++ b/tensorlayer/layers/importer.py @@ -55,17 +55,26 @@ class LambdaLayer(Layer): def __init__( self, - prev_layer, - fn, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + fn=None, fn_args=None, name='lambda_layer', ): + + # super(LambdaLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(LambdaLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if fn_args is None: fn_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + assert prev_layer is not None assert fn is not None - self.inputs = prev_layer.outputs + logging.info("LambdaLayer %s" % self.name) with tf.variable_scope(name) as vs: self.outputs = fn(self.inputs, **fn_args) @@ -103,18 +112,25 @@ class SlimNetsLayer(Layer): def __init__( self, - prev_layer, - slim_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + slim_layer=None, slim_args=None, name='tfslim_layer', ): + + # super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if slim_layer is None: raise ValueError("slim layer is None") if slim_args is None: slim_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs logging.info("SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) # with tf.variable_scope(name) as vs: @@ -165,20 +181,28 @@ class KerasLayer(Layer): def __init__( self, - prev_layer, - keras_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + keras_layer=None, keras_args=None, name='keras_layer', ): + + # super(KerasLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(KerasLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if prev_layer is None: raise ValueError("layer is None") if keras_args is None: keras_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs logging.info("KerasLayer %s: %s" % (self.name, keras_layer)) - logging.info("This API will be removed, please use LambdaLayer instead.") + logging.warning("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: self.outputs = keras_layer(self.inputs, **keras_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -210,19 +234,27 @@ class EstimatorLayer(Layer): def __init__( self, - prev_layer, - model_fn, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + model_fn=None, args=None, name='estimator_layer', ): + # super(EstimatorLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(EstimatorLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if model_fn is None: raise ValueError('model fn is None') if args is None: args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs + logging.info("EstimatorLayer %s: %s" % (self.name, model_fn)) - logging.info("This API will be removed, please use LambdaLayer instead.") + logging.warning("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: self.outputs = model_fn(self.inputs, **args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 84150a95e..4da8ed604 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -55,7 +55,9 @@ def __init__( concat_dim=-1, name='concat_layer', ): - Layer.__init__(self, prev_layer=layers, name=name) + + super(ConcatLayer, self).__init__(prev_layer=layers, name=name) + self.inputs = [] for l in layers: self.inputs.append(l.outputs) @@ -120,7 +122,8 @@ def __init__( act=None, name='elementwise_layer', ): - Layer.__init__(self, prev_layer=layers, name=name) + + super(ElementwiseLayer, self).__init__(prev_layer=layers, name=name) logging.info("ElementwiseLayer %s: size:%s fn:%s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__)) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 20f42440e..f5b77e478 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -38,15 +38,21 @@ class LocalResponseNormLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release depth_radius=None, bias=None, alpha=None, beta=None, name='lrn_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (self.name, str(depth_radius), str(bias), str(alpha), str(beta))) with tf.variable_scope(name): @@ -95,7 +101,8 @@ class BatchNormLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release decay=0.9, epsilon=0.00001, act=tf.identity, @@ -104,8 +111,13 @@ def __init__( gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), name='batchnorm_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(BatchNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(BatchNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train)) x_shape = self.inputs.get_shape() params_shape = x_shape[-1:] @@ -245,21 +257,28 @@ class LayerNormLayer(Layer): """ - def __init__(self, - prev_layer, - center=True, - scale=True, - act=tf.identity, - reuse=None, - variables_collections=None, - outputs_collections=None, - trainable=True, - begin_norm_axis=1, - begin_params_axis=-1, - name='layernorm'): + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + center=True, + scale=True, + act=tf.identity, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + begin_norm_axis=1, + begin_params_axis=-1, + name='layernorm'): + + # super(LayerNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(LayerNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs + logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__)) if tf.__version__ < "1.3": diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py index 2074dcc8d..fdbf7637f 100644 --- a/tensorlayer/layers/object_detection.py +++ b/tensorlayer/layers/object_detection.py @@ -34,15 +34,22 @@ class ROIPoolingLayer(Layer): def __init__( self, - prev_layer, - rois, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + rois=list(), pool_height=2, pool_width=2, name='roipooling_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("ROIPoolingLayer %s: (%d, %d)" % (self.name, pool_height, pool_width)) + try: from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import roi_pooling except Exception as e: diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 6da27cdd8..12a497581 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -37,15 +37,22 @@ class PadLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release padding=None, mode='CONSTANT', name='pad_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(PadLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(PadLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if padding is None: raise Exception("padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad") - self.inputs = prev_layer.outputs + logging.info("PadLayer %s: padding:%s mode:%s" % (self.name, list(padding), mode)) self.outputs = tf.pad(self.inputs, paddings=padding, mode=mode, name=name) self.all_layers.append(self.outputs) @@ -69,13 +76,22 @@ class ZeroPad1d(Layer): def __init__( self, - prev_layer, - padding, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + padding=1, name='zeropad1d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ZeroPad1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("ZeroPad1d %s: padding:%s" % (self.name, str(padding))) + + assert isinstance(padding, (int, tuple, dict)) + self.outputs = tf.keras.layers.ZeroPadding1D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -99,13 +115,22 @@ class ZeroPad2d(Layer): def __init__( self, - prev_layer, - padding, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + padding=1, name='zeropad2d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ZeroPad2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("ZeroPad2d %s: padding:%s" % (self.name, str(padding))) + + assert isinstance(padding, (int, tuple)) + self.outputs = tf.keras.layers.ZeroPadding2D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -129,12 +154,21 @@ class ZeroPad3d(Layer): def __init__( self, - prev_layer, - padding, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + padding=1, name='zeropad3d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ZeroPad3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("ZeroPad3d %s: padding:%s" % (self.name, str(padding))) + + assert isinstance(padding, (int, tuple)) + self.outputs = tf.keras.layers.ZeroPadding3D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 97df8e79f..3442dc434 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -57,15 +57,22 @@ class PoolLayer(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', pool=tf.nn.max_pool, name='pool_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(PoolLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (self.name, str(ksize), str(strides), padding, pool.__name__)) + self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) self.all_layers.append(self.outputs) @@ -173,9 +180,12 @@ def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='max net_new.all_layers.extend([outputs]) return net_new else: + assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." + net = PoolLayer( net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) + return net @@ -210,7 +220,9 @@ def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='me net_new.all_layers.extend([outputs]) return net_new else: + assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." + net = PoolLayer( net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) return net @@ -245,12 +257,24 @@ class MaxPool3d(Layer): """ - def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + filter_size=(3, 3, 3), + strides=(2, 2, 2), + padding='valid', + data_format='channels_last', + name='maxpool3d'): + # super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(MaxPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("MaxPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + self.outputs = tf.layers.max_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) # update layer (customized) self.all_layers.append(self.outputs) @@ -285,13 +309,26 @@ class MeanPool3d(Layer): """ - def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='meanpool3d'): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + filter_size=(3, 3, 3), + strides=(2, 2, 2), + padding='valid', + data_format='channels_last', + name='meanpool3d'): + + # super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(MeanPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("MeanPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + # operation (customized) self.outputs = tf.layers.average_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) # update layer (customized) @@ -319,14 +356,19 @@ class GlobalMaxPool1d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmaxpool1d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMaxPool1d %s" % name) + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=1, name=name) # update layer (customized) @@ -354,14 +396,19 @@ class GlobalMeanPool1d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmeanpool1d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMeanPool1d %s" % name) + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=1, name=name) # update layer (customized) @@ -389,14 +436,19 @@ class GlobalMaxPool2d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmaxpool2d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMaxPool2d %s" % name) + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2], name=name) # update layer (customized) @@ -424,14 +476,19 @@ class GlobalMeanPool2d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmeanpool2d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMeanPool2d %s" % name) + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2], name=name) # update layer (customized) @@ -459,14 +516,19 @@ class GlobalMaxPool3d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmaxpool3d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMaxPool3d %s" % name) + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2, 3], name=name) # update layer (customized) @@ -494,14 +556,19 @@ class GlobalMeanPool3d(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='globalmeanpool3d', ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + # super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMeanPool3d %s" % name) + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2, 3], name=name) # update layer (customized) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 5d6208e46..3b7e1c817 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -132,8 +132,9 @@ class RNNLayer(Layer): def __init__( self, - prev_layer, - cell_fn, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + cell_fn=None, cell_init_args=None, n_hidden=100, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -143,10 +144,15 @@ def __init__( return_seq_2d=False, name='rnn', ): + # super(RNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(RNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -155,8 +161,6 @@ def __init__( except Exception: logging.warning('pop state_is_tuple fails.') - self.inputs = prev_layer.outputs - logging.info("RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) # You can get the dimension by .get_shape() or ._shape, and check the @@ -319,8 +323,9 @@ class BiRNNLayer(Layer): def __init__( self, - prev_layer, - cell_fn, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + cell_fn=None, cell_init_args=None, n_hidden=100, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -333,10 +338,15 @@ def __init__( return_seq_2d=False, name='birnn', ): + # super(BiRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(BiRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} # 'use_peepholes': True, - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -345,8 +355,6 @@ def __init__( except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs - logging.info("BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), @@ -689,7 +697,8 @@ class ConvLSTMLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release cell_shape=None, feature_map=1, filter_size=(3, 3), @@ -701,8 +710,13 @@ def __init__( return_seq_2d=False, name='convlstm', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("ConvLSTMLayer %s: feature_map:%d, n_steps:%d, " "in_dim:%d %s, cell_fn:%s " % (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) # You can get the dimension by .get_shape() or ._shape, and check the @@ -1018,8 +1032,9 @@ class DynamicRNNLayer(Layer): def __init__( self, - prev_layer, - cell_fn, #tf.nn.rnn_cell.LSTMCell, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + cell_fn=None, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -1032,14 +1047,19 @@ def __init__( dynamic_rnn_init_args=None, name='dyrnn', ): + # super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if dynamic_rnn_init_args is None: dynamic_rnn_init_args = {} if cell_init_args is None: cell_init_args = {'state_is_tuple': True} if return_last is None: return_last = True - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -1047,7 +1067,6 @@ def __init__( cell_init_args.pop('state_is_tuple') except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs logging.info("DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) @@ -1271,8 +1290,9 @@ class BiDynamicRNNLayer(Layer): def __init__( self, - prev_layer, - cell_fn, #tf.nn.rnn_cell.LSTMCell, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + cell_fn=None, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -1286,12 +1306,17 @@ def __init__( dynamic_rnn_init_args=None, name='bi_dyrnn_layer', ): + # super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} if dynamic_rnn_init_args is None: dynamic_rnn_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -1299,7 +1324,6 @@ def __init__( cell_init_args.pop('state_is_tuple') except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs logging.info("BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 71a2fa1e9..b76ae5919 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -36,11 +36,17 @@ class FlattenLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release name='flatten', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(FlattenLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(FlattenLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + self.outputs = flatten_reshape(self.inputs, name=name) self.n_units = int(self.outputs.get_shape()[-1]) logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) @@ -71,14 +77,24 @@ class ReshapeLayer(Layer): def __init__( self, - prev_layer, - shape, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + shape=list(), name='reshape', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(ReshapeLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(ReshapeLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs - self.outputs = tf.reshape(self.inputs, shape=shape, name=name) + logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) + + if shape: + raise ValueError("Shape list can not be empty") + + self.outputs = tf.reshape(self.inputs, shape=shape, name=name) self.all_layers.append(self.outputs) @@ -107,12 +123,18 @@ class TransposeLayer(Layer): def __init__( self, - prev_layer, - perm, - name='transpose', - ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + perm=None, + name='transpose'): + + # super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(TransposeLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + assert perm is not None logging.info("TransposeLayer %s: perm:%s" % (self.name, perm)) diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 093ce5c91..22d7167dc 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -227,15 +227,21 @@ class SpatialTransformer2dAffineLayer(Layer): def __init__( self, prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release theta_layer=None, out_size=None, name='sapatial_trans_2d_affine', ): + # super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=[prev_layer, theta_layer], layer=[layer, theta_layer], name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + if out_size is None: out_size = [40, 40] - Layer.__init__(self, prev_layer=[prev_layer, theta_layer], name=name) - self.inputs = prev_layer.outputs self.theta_layer = theta_layer logging.info("SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % (name, self.inputs.get_shape().as_list(), out_size)) diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py index 3aa50b1a0..bc01af2e3 100644 --- a/tensorlayer/layers/special_activation.py +++ b/tensorlayer/layers/special_activation.py @@ -35,17 +35,24 @@ class PReluLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release channel_shared=False, a_init=tf.constant_initializer(value=0.0), a_init_args=None, # restore = True, name="prelu_layer"): + if a_init_args is None: a_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(PReluLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(PReluLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + self.inputs = prev_layer.outputs + logging.info("PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) if channel_shared: w_shape = (1, ) From 6029e085a8613e05f6f48eaac904864cd20676b3 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 15:19:50 +0200 Subject: [PATCH 06/21] test layers extend with argument names precised --- tests/test_layers_extend.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_layers_extend.py b/tests/test_layers_extend.py index d7469f5dc..eaf26b4a7 100644 --- a/tests/test_layers_extend.py +++ b/tests/test_layers_extend.py @@ -7,13 +7,13 @@ n = tl.layers.DenseLayer(n, 100, name='d1') n = tl.layers.DenseLayer(n, 100, name='d2') -n = tl.layers.ExpandDimsLayer(n, 2) +n = tl.layers.ExpandDimsLayer(n, axis=2) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 1: raise Exception("shape dont match") -n = tl.layers.TileLayer(n, [-1, 1, 3]) +n = tl.layers.TileLayer(n, multiples=[-1, 1, 3]) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 3: From 1b1254cdfd478f4ca1c6ffe848b03b34da0335a6 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 15:20:39 +0200 Subject: [PATCH 07/21] tl.layers.core.py forgotten Classes with deprecation --- tensorlayer/layers/core.py | 57 +++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 13 deletions(-) diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index 17baee9cd..f846915bb 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -382,7 +382,11 @@ class Layer(object): """ # def __init__(self, prev_layer=None, name=None): - def __init__(self, prev_layer=None, layer=None, name=None): # TODO change this line for the one above for the 1.9 release + def __init__( + self, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release + name=None): if name is None: raise ValueError('Layer must have a name.') @@ -919,7 +923,8 @@ class DenseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release n_units=100, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=0.1), @@ -928,19 +933,26 @@ def __init__( b_init_args=None, name='dense', ): + # super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + + self.inputs = prev_layer.outputs + self.n_units = n_units + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) - self.n_units = n_units + logging.info("DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: @@ -1007,13 +1019,16 @@ class ReconLayer(DenseLayer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release x_recon=None, n_units=784, act=tf.nn.softplus, name='recon', ): - DenseLayer.__init__(self, prev_layer=prev_layer, n_units=n_units, act=act, name=name) + # super(ReconLayer, self).__init__(prev_layer=prev_layer, name=name, n_units=n_units, act=act, name=name) # TODO replace the line below with this line for the 1.9 release + super(ReconLayer, self).__init__(prev_layer=prev_layer, layer=layer, n_units=n_units, act=act, name=name) + logging.info("%s is a ReconLayer" % self.name) # y : reconstruction outputs; train_params : parameters to train @@ -1211,14 +1226,19 @@ class DropoutLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release keep=0.5, is_fix=False, is_train=True, seed=None, name='dropout_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DropoutLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + if is_train is False: logging.info(" skip DropoutLayer") self.outputs = prev_layer.outputs @@ -1294,14 +1314,19 @@ class GaussianNoiseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release mean=0.0, stddev=1.0, is_train=True, seed=None, name='gaussian_noise_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + # super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + if is_train is False: logging.info(" skip GaussianNoiseLayer") self.outputs = prev_layer.outputs @@ -1367,7 +1392,8 @@ class DropconnectDenseLayer(Layer): def __init__( self, - prev_layer, + prev_layer=None, + layer=None, # TODO remove this line for the 1.9 release keep=0.5, n_units=100, act=tf.identity, @@ -1377,13 +1403,18 @@ def __init__( b_init_args=None, name='dropconnect_layer', ): + # super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release + super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) + if layer is not None: + prev_layer = layer + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs + if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) From d4f1c3af552fe2d757c4862932c93a2c1fd6c143 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:07:28 +0200 Subject: [PATCH 08/21] Error fix in deprecation warning tl.layers.spatial_transformer.py --- tensorlayer/layers/core.py | 1 - tensorlayer/layers/spatial_transformer.py | 18 +++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index f846915bb..ae78583a1 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -381,7 +381,6 @@ class Layer(object): """ - # def __init__(self, prev_layer=None, name=None): def __init__( self, prev_layer=None, diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 22d7167dc..ed3e3fe02 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- +import warnings + import numpy as np import tensorflow as tf from six.moves import xrange @@ -230,19 +232,25 @@ def __init__( layer=None, # TODO remove this line for the 1.9 release theta_layer=None, out_size=None, - name='sapatial_trans_2d_affine', + name='spatial_trans_2d_affine', ): - # super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=[prev_layer, theta_layer], layer=[layer, theta_layer], name=name) if layer is not None: - prev_layer = layer + # TODO remove the whole block for the 1.9 release + warnings.warn("deprecated", DeprecationWarning) + logging.warning("DeprecationWarning: `layer` argument in %s.%s is deprecated and will be removed in 1.9, please change for `prev_layer`" % + (self.__module__, self.__class__.__name__)) + + if layer is not None: + prev_layer = layer + + super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=[prev_layer, theta_layer], name=name) self.inputs = prev_layer.outputs + self.theta_layer = theta_layer if out_size is None: out_size = [40, 40] - self.theta_layer = theta_layer logging.info("SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % (name, self.inputs.get_shape().as_list(), out_size)) with tf.variable_scope(name) as vs: From 8730a25840e14c55966f4e8ee5a657c1012fbf3c Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:07:59 +0200 Subject: [PATCH 09/21] Test refactored and fix with arguments missing added --- tests/test_layers_basic.py | 14 ++-- tests/test_layers_convolution.py | 48 ++++++------- tests/test_layers_core.py | 66 +++++++++--------- tests/test_layers_extend.py | 12 ++-- tests/test_layers_flow_control.py | 6 +- tests/test_layers_importer.py | 2 +- tests/test_layers_merge.py | 40 +++++------ tests/test_layers_normalization.py | 6 +- tests/test_layers_padding.py | 40 +++++------ tests/test_layers_pooling.py | 40 +++++------ tests/test_layers_recurrent.py | 87 ++++++++++++------------ tests/test_layers_shape.py | 26 +++---- tests/test_layers_spatial_transformer.py | 31 ++++----- tests/test_layers_special_activation.py | 16 ++--- tests/test_layers_stack.py | 16 ++--- tests/test_layers_super_resolution.py | 16 ++--- tests/test_layers_time_distributed.py | 4 +- tests/test_mnist_simple.py | 4 +- tests/test_models.py | 14 ++-- 19 files changed, 244 insertions(+), 244 deletions(-) diff --git a/tests/test_layers_basic.py b/tests/test_layers_basic.py index 1c246a550..09f4fd2c9 100644 --- a/tests/test_layers_basic.py +++ b/tests/test_layers_basic.py @@ -3,25 +3,25 @@ x = tf.placeholder(tf.float32, [None, 100]) n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, 80, name='d1') -n = tl.layers.DenseLayer(n, 80, name='d2') +n = tl.layers.DenseLayer(n, n_units=80, name='d1') +n = tl.layers.DenseLayer(n, n_units=80, name='d2') print(n) n.print_layers() n.print_params(False) print(n.count_params()) if n.count_params() != 14560: - raise Exception("params dont match") + raise Exception("params do not match") shape = n.outputs.get_shape().as_list() if shape[-1] != 80: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") for l in n: print(l) @@ -32,7 +32,7 @@ shape = n2.outputs.get_shape().as_list() if shape[-1] != 30: - raise Exception("shape dont match") + raise Exception("shape do not match") for l in n2: print(l) diff --git a/tests/test_layers_convolution.py b/tests/test_layers_convolution.py index 830a23090..b5560352d 100644 --- a/tests/test_layers_convolution.py +++ b/tests/test_layers_convolution.py @@ -8,13 +8,13 @@ n = tl.layers.Conv1dLayer(nin, shape=(5, 1, 32), stride=2) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2) print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # AtrousConv1dLayer @@ -33,29 +33,29 @@ print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=None, name='conv2d') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d') shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") n.print_params(False) if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") n = tl.layers.DeConv2dLayer(nin, shape=(5, 5, 32, 3), output_shape=(100, 200, 200, 32), strides=(1, 2, 2, 1), name='deconv2dlayer') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") print(nin.outputs) n = tl.layers.DeConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d') @@ -63,20 +63,20 @@ shape = n.outputs.get_shape().as_list() # if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): # TODO: why [None None None 32] ? if (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.DepthwiseConv2d(nin, (3, 3), (2, 2), tf.nn.relu, depth_multiplier=2, name='depthwise') +n = tl.layers.DepthwiseConv2d(nin, shape=(3, 3), strides=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 6): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=tf.nn.relu, name='conv2d2') -n = tl.layers.GroupConv2d(n, 32, (3, 3), (2, 2), name='group') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='conv2d2') +n = tl.layers.GroupConv2d(n, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='group') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 25) or (shape[2] != 25) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # n = UpSampling2dLayer # n = DownSampling2dLayer @@ -90,22 +90,22 @@ # AtrousConv2dLayer -n = tl.layers.SeparableConv2d(nin, 32, (3, 3), (1, 1), tf.nn.relu, name='seperable1') +n = tl.layers.SeparableConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, name='seperable1') n.print_layers() n.print_params(False) shape = n.outputs.get_shape().as_list() if shape[1:] != [98, 98, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 155: - raise Exception("params dont match") + raise Exception("params do not match") # exit() ## 3D @@ -116,14 +116,14 @@ print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 50) or (shape[4] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # n = tl.layers.DeConv3dLayer(nin, shape=(2, 2, 2, 128, 3), output_shape=(100, 12, 32, 32, 128), strides=(1, 2, 2, 2, 1)) # print(n) # shape = n.outputs.get_shape().as_list() -n = tl.layers.DeConv3d(nin, 32, (3, 3, 3), (2, 2, 2)) +n = tl.layers.DeConv3d(nin, n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)) shape = n.outputs.get_shape().as_list() print(shape) if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 200) or (shape[4] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_core.py b/tests/test_layers_core.py index e0410c0f6..53f0d8a65 100644 --- a/tests/test_layers_core.py +++ b/tests/test_layers_core.py @@ -4,23 +4,23 @@ ## DenseLayer x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, 10, name='dense') +net = tl.layers.DenseLayer(net, n_units=10, name='dense') net.print_layers() net.print_params(False) shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 310: - raise Exception("params dont match") + raise Exception("params do not match") ## OneHotInputLayer x = tf.placeholder(tf.int32, shape=[None]) @@ -32,16 +32,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 8: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 0: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## Word2vecEmbeddingInputlayer batch_size = 8 @@ -57,16 +57,16 @@ shape = net.outputs.get_shape().as_list() if shape != [8, 200]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 401000: - raise Exception("params dont match") + raise Exception("params do not match") ## EmbeddingInputlayer batch_size = 8 @@ -78,16 +78,16 @@ shape = net.outputs.get_shape().as_list() if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 50000: - raise Exception("params dont match") + raise Exception("params do not match") ## AverageEmbeddingInputlayer batch_size = 8 @@ -100,16 +100,16 @@ shape = net.outputs.get_shape().as_list() if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 50000: - raise Exception("params dont match") + raise Exception("params do not match") ## ReconLayer x = tf.placeholder(tf.float32, shape=(None, 784)) @@ -126,16 +126,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 784: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 308308: - raise Exception("params dont match") + raise Exception("params do not match") ## GaussianNoiseLayer x = tf.placeholder(tf.float32, shape=(64, 784)) @@ -148,16 +148,16 @@ shape = net.outputs.get_shape().as_list() if shape != [64, 100]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 78500: - raise Exception("params dont match") + raise Exception("params do not match") ## DropconnectDenseLayer x = tf.placeholder(tf.float32, shape=(64, 784)) @@ -170,13 +170,13 @@ shape = net.outputs.get_shape().as_list() if shape != [64, 100]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 88600: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_extend.py b/tests/test_layers_extend.py index eaf26b4a7..56f512e37 100644 --- a/tests/test_layers_extend.py +++ b/tests/test_layers_extend.py @@ -4,25 +4,25 @@ ## 1D x = tf.placeholder(tf.float32, (None, 100)) n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, 100, name='d1') -n = tl.layers.DenseLayer(n, 100, name='d2') +n = tl.layers.DenseLayer(n, n_units=100, name='d1') +n = tl.layers.DenseLayer(n, n_units=100, name='d2') n = tl.layers.ExpandDimsLayer(n, axis=2) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 1: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.TileLayer(n, multiples=[-1, 1, 3]) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 3: - raise Exception("shape dont match") + raise Exception("shape do not match") n.print_layers() n.print_params(False) # print(n.all_layers, n.all_params) if len(n.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_flow_control.py b/tests/test_layers_flow_control.py index 8d367367c..41e5c2a81 100644 --- a/tests/test_layers_flow_control.py +++ b/tests/test_layers_flow_control.py @@ -27,10 +27,10 @@ network.print_params(False) if len(network.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if len(network.all_layers) != 13: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(network.all_drop) != 5: - raise Exception("drop dont match") + raise Exception("drop do not match") diff --git a/tests/test_layers_importer.py b/tests/test_layers_importer.py index c869fe34e..44330a186 100644 --- a/tests/test_layers_importer.py +++ b/tests/test_layers_importer.py @@ -36,7 +36,7 @@ def keras_block(x): # logits, end_points = inception_v3(X, num_classes=1001, # is_training=False) network = tl.layers.SlimNetsLayer( - prev_layer=net_in, + net_in, slim_layer=inception_v3, slim_args={ 'num_classes': 1001, diff --git a/tests/test_layers_merge.py b/tests/test_layers_merge.py index 236ca0b33..c85688191 100644 --- a/tests/test_layers_merge.py +++ b/tests/test_layers_merge.py @@ -6,21 +6,21 @@ ## vector x = tf.placeholder(tf.float32, shape=[None, 784]) inputs = tl.layers.InputLayer(x, name='input_layer') -net1 = tl.layers.DenseLayer(inputs, 100, act=tf.nn.relu, name='relu1_1') -net2 = tl.layers.DenseLayer(inputs, 100, act=tf.nn.relu, name='relu2_1') -net = tl.layers.ConcatLayer([net1, net2], 1, name='concat_layer') +net1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu1_1') +net2 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu2_1') +net = tl.layers.ConcatLayer([net1, net2], concat_dim=1, name='concat_layer') net.print_params(False) net.print_layers() if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 157000: - raise Exception("params dont match") + raise Exception("params do not match") net_0 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_0') net_1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_1') @@ -30,36 +30,36 @@ net.print_layers() if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 157000: - raise Exception("params dont match") + raise Exception("params do not match") ## image x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) inputs = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.Conv2d(inputs, 32, (3, 3), (2, 2), act=tf.nn.relu, name='c1') -net2 = tl.layers.Conv2d(inputs, 32, (3, 3), (2, 2), act=tf.nn.relu, name='c2') -net = tl.layers.ConcatLayer([net1, net2], -1, name='concat') +net1 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c1') +net2 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c2') +net = tl.layers.ConcatLayer([net1, net2], concat_dim=-1, name='concat') net.print_params(False) net.print_layers() shape = net.outputs.get_shape().as_list() if shape[1:] != [50, 50, 64]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1792: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.ElementwiseLayer([net1, net2], combine_fn=tf.minimum, name='minimum2') net.print_params(False) @@ -67,13 +67,13 @@ shape = net.outputs.get_shape().as_list() if shape[1:] != [50, 50, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1792: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_normalization.py b/tests/test_layers_normalization.py index 839acd7d2..2f52f8d93 100644 --- a/tests/test_layers_normalization.py +++ b/tests/test_layers_normalization.py @@ -22,10 +22,10 @@ def model(x, is_train=True, reuse=False): net.print_params(False) if len(net.all_layers) != 6: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 60560: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_padding.py b/tests/test_layers_padding.py index a0c859436..8d3d7f065 100644 --- a/tests/test_layers_padding.py +++ b/tests/test_layers_padding.py @@ -1,59 +1,59 @@ import tensorflow as tf -from tensorlayer.layers import ZeroPad1d, ZeroPad2d, ZeroPad3d, InputLayer +import tensorlayer as tl ## 1D x = tf.placeholder(tf.float32, (None, 100, 1)) -n = InputLayer(x) -n1 = ZeroPad1d(n, padding=1) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad1d(n, padding=1) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [102, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad1d(n, padding=(2, 3)) +n2 = tl.layers.ZeroPad1d(n, padding=(2, 3)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [105, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 2D x = tf.placeholder(tf.float32, (None, 100, 100, 3)) -n = InputLayer(x) -n1 = ZeroPad2d(n, padding=2) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad2d(n, padding=2) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [104, 104, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad2d(n, padding=(2, 3)) +n2 = tl.layers.ZeroPad2d(n, padding=(2, 3)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [104, 106, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n3 = ZeroPad2d(n, padding=((3, 3), (4, 4))) +n3 = tl.layers.ZeroPad2d(n, padding=((3, 3), (4, 4))) n3.print_layers() shape = n3.outputs.get_shape().as_list() if shape[1:] != [106, 108, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 3D x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) -n = InputLayer(x) -n1 = ZeroPad3d(n, padding=2) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad3d(n, padding=2) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [104, 104, 104, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad3d(n, padding=(2, 3, 4)) +n2 = tl.layers.ZeroPad3d(n, padding=(2, 3, 4)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [104, 106, 108, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n3 = ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) +n3 = tl.layers.ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) n3.print_layers() shape = n3.outputs.get_shape().as_list() if shape[1:] != [106, 108, 110, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_pooling.py b/tests/test_layers_pooling.py index 6086c568d..b94cee8f5 100644 --- a/tests/test_layers_pooling.py +++ b/tests/test_layers_pooling.py @@ -4,89 +4,89 @@ ## 1D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 1)) nin = tl.layers.InputLayer(x, name='in1') -nin = tl.layers.Conv1d(nin, 32, 5, 2, name='conv1d') +nin = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2, name='conv1d') print(nin) shape = nin.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MaxPool1d(nin, 3, 2, 'same', name='maxpool1d') +n = tl.layers.MaxPool1d(nin, filter_size=3, strides=2, padding='same', name='maxpool1d') print(n) shape = n.outputs.get_shape().as_list() # print(shape[1:3]) if shape[1:3] != [25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MeanPool1d(nin, 3, 2, 'same', name='meanpool1d') +n = tl.layers.MeanPool1d(nin, filter_size=3, strides=2, padding='same', name='meanpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[1:3] != [25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool1d(nin, name='maxpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool1d(nin, name='meanpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 2D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 100, 3)) nin = tl.layers.InputLayer(x, name='in2') -nin = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), name='conv2d') +nin = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='conv2d') print(nin) shape = nin.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MaxPool2d(nin, (3, 3), (2, 2), 'SAME', name='maxpool2d') +n = tl.layers.MaxPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool2d') print(n) shape = n.outputs.get_shape().as_list() # print(shape[1:3]) if shape[1:4] != [25, 25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MeanPool2d(nin, (3, 3), (2, 2), 'SAME', name='meanpool2d') +n = tl.layers.MeanPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[1:4] != [25, 25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool2d(nin, name='maxpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool2d(nin, name='meanpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 3D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) nin = tl.layers.InputLayer(x, name='in') -n = tl.layers.MeanPool3d(nin, (3, 3, 3), (2, 2, 2), 'SAME', name='meanpool3d') +n = tl.layers.MeanPool3d(nin, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME', name='meanpool3d') print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 50, 50, 50, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool3d(nin) print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool3d(nin) print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_recurrent.py b/tests/test_layers_recurrent.py index 2a9726e9e..c416d19a6 100644 --- a/tests/test_layers_recurrent.py +++ b/tests/test_layers_recurrent.py @@ -23,40 +23,41 @@ net.print_params(False) if len(net.all_layers) != 7: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 7: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 7790: - raise Exception("params dont match") + raise Exception("params do not match") ## CNN+RNN encoder ==================================================== image_size = 100 batch_size = 10 num_steps = 5 + x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) net = tl.layers.InputLayer(x, name='in') -net = tl.layers.Conv2d(net, 32, (5, 5), (2, 2), tf.nn.relu, name='cnn1') -net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool1') -net = tl.layers.Conv2d(net, 10, (5, 5), (2, 2), tf.nn.relu, name='cnn2') -net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool2') +net = tl.layers.Conv2d(net, n_filter=32, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn1') +net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool1') +net = tl.layers.Conv2d(net, n_filter=10, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn2') +net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool2') net = tl.layers.FlattenLayer(net, name='flatten') -net = tl.layers.ReshapeLayer(net, shape=[-1, num_steps, int(net.outputs._shape[-1])]) +net = tl.layers.ReshapeLayer(net, shape=(-1, num_steps, int(net.outputs._shape[-1]))) rnn = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=num_steps, return_last=False, return_seq_2d=True, name='rnn') -net = tl.layers.DenseLayer(rnn, 3, name='out') +net = tl.layers.DenseLayer(rnn, n_units=3, name='out') net.print_layers() net.print_params(False) if len(net.all_layers) != 8: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 8: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 562245: - raise Exception("params dont match") + raise Exception("params do not match") ## Bidirectional Synced input and output batch_size = 10 @@ -73,16 +74,16 @@ shape = net.outputs.get_shape().as_list() if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 5: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 7160: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=2 net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='emb2') @@ -94,16 +95,16 @@ shape = net.outputs.get_shape().as_list() if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 9: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 13720: - raise Exception("params dont match") + raise Exception("params do not match") ## ConvLSTMLayer TODO # image_size = 100 @@ -147,20 +148,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 5: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 4510: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=3 nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=vocab_size, embedding_size=embedding_size, name='seq_embedding2') @@ -178,7 +179,7 @@ shape = rnn.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -197,7 +198,7 @@ shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 3): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -213,7 +214,7 @@ net.print_params(False) shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -229,7 +230,7 @@ net.print_params(False) shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") ## BiDynamic Synced input and output rnn = tl.layers.BiDynamicRNNLayer( @@ -248,20 +249,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size * 2: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 7: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 8390: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=2 rnn = tl.layers.BiDynamicRNNLayer( @@ -281,20 +282,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size * 2: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 11: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 18150: - raise Exception("params dont match") + raise Exception("params do not match") ## Seq2Seq from tensorlayer.layers import EmbeddingInputlayer, Seq2Seq, retrieve_seq_length_op2, DenseLayer @@ -333,13 +334,13 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10000: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 5: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 11: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 5293200: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_shape.py b/tests/test_layers_shape.py index 585fbf012..09efe5b80 100644 --- a/tests/test_layers_shape.py +++ b/tests/test_layers_shape.py @@ -12,35 +12,35 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 784: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## Reshape -net = tl.layers.ReshapeLayer(net, [-1, 28, 28, 1], name='reshape') +net = tl.layers.ReshapeLayer(net, shape=(-1, 28, 28, 1), name='reshape') net.print_layers() net.print_params(False) shape = net.outputs.get_shape().as_list() if shape[1:] != [28, 28, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## TransposeLayer net = tl.layers.TransposeLayer(net, perm=[0, 1, 3, 2], name='trans') @@ -50,13 +50,13 @@ shape = net.outputs.get_shape().as_list() if shape[1:] != [28, 1, 28]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_spatial_transformer.py b/tests/test_layers_spatial_transformer.py index af9021649..ab7fe73de 100644 --- a/tests/test_layers_spatial_transformer.py +++ b/tests/test_layers_spatial_transformer.py @@ -1,29 +1,28 @@ import tensorflow as tf -from tensorlayer.layers import InputLayer, FlattenLayer, DenseLayer, DropoutLayer, SpatialTransformer2dAffineLayer, Conv2d +import tensorlayer as tl x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) - def model(x, is_train, reuse): with tf.variable_scope("STN", reuse=reuse): - nin = InputLayer(x, name='in') + nin = tl.layers.InputLayer(x, name='in') ## 1. Localisation network # use MLP as the localisation net - nt = FlattenLayer(nin, name='flatten') - nt = DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1') - nt = DropoutLayer(nt, 0.8, True, is_train, name='drop1') + nt = tl.layers.FlattenLayer(nin, name='flatten') + nt = tl.layers.DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1') + nt = tl.layers.DropoutLayer(nt, keep=0.8, is_fix=True, is_train=is_train, name='drop1') # you can also use CNN instead for MLP as the localisation net # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1') # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2') ## 2. Spatial transformer module (sampler) - n = SpatialTransformer2dAffineLayer(nin, nt, out_size=[40, 40], name='spatial') + n = tl.layers.SpatialTransformer2dAffineLayer(nin, theta_layer=nt, out_size=[40, 40], name='spatial') s = n ## 3. Classifier - n = Conv2d(n, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='conv1') - n = Conv2d(n, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='conv2') - n = FlattenLayer(n, name='flatten2') - n = DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1') - n = DenseLayer(n, n_units=10, act=tf.identity, name='out2') + n = tl.layers.Conv2d(n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv1') + n = tl.layers.Conv2d(n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv2') + n = tl.layers.FlattenLayer(n, name='flatten2') + n = tl.layers.DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1') + n = tl.layers.DenseLayer(n, n_units=10, act=tf.identity, name='out2') return n, s @@ -35,13 +34,13 @@ def model(x, is_train, reuse): shape = s.outputs.get_shape().as_list() if shape[1:] != [40, 40, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 9: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1667980: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_special_activation.py b/tests/test_layers_special_activation.py index 137ee45ea..970b55176 100644 --- a/tests/test_layers_special_activation.py +++ b/tests/test_layers_special_activation.py @@ -11,16 +11,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 320: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.PReluLayer(net, channel_shared=True, name='prelu2') @@ -29,13 +29,13 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 321: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_stack.py b/tests/test_layers_stack.py index db60318fe..7be9a80df 100644 --- a/tests/test_layers_stack.py +++ b/tests/test_layers_stack.py @@ -13,32 +13,32 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 6: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 930: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.UnStackLayer(net, axis=1, name='unstack') for n in net: print(n, n.outputs) shape = n.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") # n.print_layers() # n.print_params(False) if len(n.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 6: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 930: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_super_resolution.py b/tests/test_layers_super_resolution.py index 9b6a1d48e..eb1377451 100644 --- a/tests/test_layers_super_resolution.py +++ b/tests/test_layers_super_resolution.py @@ -13,16 +13,16 @@ shape = n.outputs.get_shape().as_list() if shape != [10, 200, 16]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 416: - raise Exception("params dont match") + raise Exception("params do not match") ## 2D x = tf.placeholder('float32', [10, 100, 100, 3], name='x') @@ -36,13 +36,13 @@ shape = n.outputs.get_shape().as_list() if shape != [10, 200, 200, 8]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 608: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_time_distributed.py b/tests/test_layers_time_distributed.py index ed805958d..7425c2f8e 100644 --- a/tests/test_layers_time_distributed.py +++ b/tests/test_layers_time_distributed.py @@ -12,11 +12,11 @@ net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense') if net.outputs.get_shape().as_list() != [32, 20, 50]: - raise Exception("shape dont match") + raise Exception("shape do not match") # ... (32, 20, 50) net.print_params(False) if net.count_params() != 5050: - raise Exception("params dont match") + raise Exception("params do not match") ## reuse diff --git a/tests/test_mnist_simple.py b/tests/test_mnist_simple.py index 84acd4b88..980053ef6 100644 --- a/tests/test_mnist_simple.py +++ b/tests/test_mnist_simple.py @@ -17,9 +17,9 @@ # define the network network = tl.layers.InputLayer(x, name='input') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') -network = tl.layers.DenseLayer(network, 100, tf.nn.relu, name='relu1') +network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu1') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop2') -network = tl.layers.DenseLayer(network, 100, tf.nn.relu, name='relu2') +network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu2') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop3') # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to # speed up computation, so we use identity here. diff --git a/tests/test_models.py b/tests/test_models.py index e8bbe9663..856dbf8b7 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -14,10 +14,10 @@ # use for inferencing probs = tf.nn.softmax(vgg.outputs) if len(vgg.all_layers) != 22: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg.all_params) != 32: - raise Exception("params dont match") + raise Exception("params do not match") with tf.Graph().as_default() as graph: # - Extract features with VGG16 and Train a classifier with 100 classes @@ -26,10 +26,10 @@ vgg = tl.models.VGG16(x, end_with='fc2_relu') if len(vgg.all_layers) != 21: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg.all_params) != 30: - raise Exception("params dont match") + raise Exception("params do not match") # add one more layer net = tl.layers.DenseLayer(vgg, 100, name='out') @@ -41,7 +41,7 @@ # train your own classifier (only update the last layer) train_params = tl.layers.get_variables_with_name('out') if len(train_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") with tf.Graph().as_default() as graph: # - Reuse model @@ -56,7 +56,7 @@ # vgg1.restore_params(sess) if len(vgg1.all_layers) != 21: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg1.all_params) != 30: - raise Exception("params dont match") + raise Exception("params do not match") From 831f7f4c6b8b8440373bcfafdea58208e6c5bf73 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:47:16 +0200 Subject: [PATCH 10/21] ReshapeLayer error fix --- tensorlayer/layers/shape.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index b76ae5919..31f47ab39 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -89,14 +89,14 @@ def __init__( self.inputs = prev_layer.outputs - logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) - if shape: raise ValueError("Shape list can not be empty") self.outputs = tf.reshape(self.inputs, shape=shape, name=name) self.all_layers.append(self.outputs) + logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) + class TransposeLayer(Layer): """A layer that transposes the dimension of a tensor. From 4d8124e3e18da2d2b1087991bb2139bbc7140d7f Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:48:33 +0200 Subject: [PATCH 11/21] test_layers_normalization argument name missing fixed --- tests/test_layers_normalization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_layers_normalization.py b/tests/test_layers_normalization.py index 2f52f8d93..38480a1f5 100644 --- a/tests/test_layers_normalization.py +++ b/tests/test_layers_normalization.py @@ -5,9 +5,9 @@ def model(x, is_train=True, reuse=False): with tf.variable_scope("model", reuse=reuse): n = tl.layers.InputLayer(x, name='in') - n = tl.layers.Conv2d(n, 80, name='conv2d_1') + n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_1') n = tl.layers.BatchNormLayer(n, name='norm_batch') - n = tl.layers.Conv2d(n, 80, name='conv2d_2') + n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_2') n = tl.layers.LocalResponseNormLayer(n, name='norm_local') n = tl.layers.LayerNormLayer(n, name='norm_layer') n = tl.layers.InstanceNormLayer(n, name='norm_instance') From 3304c447ef5bc10cee96e417261fedfd162a9014 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:53:42 +0200 Subject: [PATCH 12/21] error in tl.layers.ReshapeLayer test if shape is not empty fixed --- tensorlayer/layers/shape.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 31f47ab39..5ae761ce0 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -89,7 +89,7 @@ def __init__( self.inputs = prev_layer.outputs - if shape: + if not shape: raise ValueError("Shape list can not be empty") self.outputs = tf.reshape(self.inputs, shape=shape, name=name) From a65ff54f66a678f16a641f71086af4799b9b4b3c Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:54:47 +0200 Subject: [PATCH 13/21] test_layers_special_activation missing argument name fixed --- tests/test_layers_special_activation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_layers_special_activation.py b/tests/test_layers_special_activation.py index 970b55176..c50591eea 100644 --- a/tests/test_layers_special_activation.py +++ b/tests/test_layers_special_activation.py @@ -3,7 +3,7 @@ x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, 10, name='dense') +net = tl.layers.DenseLayer(net, n_units=10, name='dense') net = tl.layers.PReluLayer(net, name='prelu') net.print_layers() From 6bedf2ded2baacb67f38c782deb834c51c19a4b1 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:58:50 +0200 Subject: [PATCH 14/21] test_layers_stack missing argument name fixed --- tests/test_layers_stack.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_layers_stack.py b/tests/test_layers_stack.py index 7be9a80df..b473c00d2 100644 --- a/tests/test_layers_stack.py +++ b/tests/test_layers_stack.py @@ -3,9 +3,9 @@ x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.DenseLayer(net, 10, name='dense1') -net2 = tl.layers.DenseLayer(net, 10, name='dense2') -net3 = tl.layers.DenseLayer(net, 10, name='dense3') +net1 = tl.layers.DenseLayer(net, n_units=10, name='dense1') +net2 = tl.layers.DenseLayer(net, n_units=10, name='dense2') +net3 = tl.layers.DenseLayer(net, n_units=10, name='dense3') net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack') net.print_layers() From 4ae2a0b6d427365ff5ffd0f59f41d5ab76a60f43 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:59:04 +0200 Subject: [PATCH 15/21] test_layers_super_resolution missing argument name fixed --- tests/test_layers_super_resolution.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test_layers_super_resolution.py b/tests/test_layers_super_resolution.py index eb1377451..4850d66ff 100644 --- a/tests/test_layers_super_resolution.py +++ b/tests/test_layers_super_resolution.py @@ -1,11 +1,11 @@ import tensorflow as tf -from tensorlayer.layers import SubpixelConv1d, SubpixelConv2d, InputLayer, Conv1d, Conv2d +import tensorlayer as tl ## 1D t_signal = tf.placeholder('float32', [10, 100, 4], name='x') -n = InputLayer(t_signal, name='in') -n = Conv1d(n, 32, 3, 1, padding='SAME', name='conv1d') -n = SubpixelConv1d(n, scale=2, name='subpixel') +n = tl.layers.InputLayer(t_signal, name='in') +n = tl.layers.Conv1d(n, n_filter=32, filter_size=3, stride=1, padding='SAME', name='conv1d') +n = tl.layers.SubpixelConv1d(n, scale=2, name='subpixel') print(n.outputs.shape) # ... (10, 200, 2) n.print_layers() @@ -26,9 +26,9 @@ ## 2D x = tf.placeholder('float32', [10, 100, 100, 3], name='x') -n = InputLayer(x, name='in') -n = Conv2d(n, 32, (3, 2), (1, 1), padding='SAME', name='conv2d') -n = SubpixelConv2d(n, scale=2, name='subpixel2d') +n = tl.layers.InputLayer(x, name='in') +n = tl.layers.Conv2d(n, n_filter=32, filter_size=(3, 2), strides=(1, 1), padding='SAME', name='conv2d') +n = tl.layers.SubpixelConv2d(n, scale=2, name='subpixel2d') print(n.outputs.shape) n.print_layers() From dc886924cc3887b76835d5f651517c508a9175d3 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 16:59:57 +0200 Subject: [PATCH 16/21] test_models missing argument name fixed --- tests/test_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models.py b/tests/test_models.py index 856dbf8b7..7800a918d 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -32,7 +32,7 @@ raise Exception("params do not match") # add one more layer - net = tl.layers.DenseLayer(vgg, 100, name='out') + net = tl.layers.DenseLayer(vgg, n_units=100, name='out') # initialize all parameters # sess = tf.InteractiveSession() # tl.layers.initialize_global_variables(sess) From 6c02e5bac38f28684e28106465d61a98b123ed03 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Tue, 10 Apr 2018 17:04:12 +0200 Subject: [PATCH 17/21] Formating error fixed --- tests/test_layers_spatial_transformer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_layers_spatial_transformer.py b/tests/test_layers_spatial_transformer.py index ab7fe73de..6935528a5 100644 --- a/tests/test_layers_spatial_transformer.py +++ b/tests/test_layers_spatial_transformer.py @@ -3,6 +3,7 @@ x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) + def model(x, is_train, reuse): with tf.variable_scope("STN", reuse=reuse): nin = tl.layers.InputLayer(x, name='in') From 9702860ce5f337a16f40256471ccfccc84445a60 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Thu, 12 Apr 2018 22:28:31 +0200 Subject: [PATCH 18/21] Decorator for deprecated argument added --- tensorlayer/deprecation.py | 42 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tensorlayer/deprecation.py diff --git a/tensorlayer/deprecation.py b/tensorlayer/deprecation.py new file mode 100644 index 000000000..61a153db6 --- /dev/null +++ b/tensorlayer/deprecation.py @@ -0,0 +1,42 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import functools +import warnings + +from . import _logging as logging + + +def deprecated_alias(end_support_version, **aliases): + def deco(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + + try: + func_name = "{}.{}".format(args[0].__class__.__name__, f.__name__) + except (NameError, IndexError): + func_name = f.__name__ + + rename_kwargs(kwargs, aliases, end_support_version, func_name) + + return f(*args, **kwargs) + + return wrapper + + return deco + + +def rename_kwargs(kwargs, aliases, end_support_version, func_name): + + for alias, new in aliases.items(): + + if alias in kwargs: + + if new in kwargs: + raise TypeError('{}() received both {} and {}'.format(func_name, alias, new)) + + warnings.warn('{}() - {} is deprecated; use {}'.format(func_name, alias, new), DeprecationWarning) + logging.warning("DeprecationWarning: {}(): " + "`{}` argument is deprecated and will be removed in version {}, " + "please change for `{}.`".format(func_name, alias, end_support_version, new)) + kwargs[new] = kwargs.pop(alias) From 8ea85dfc1cde5e99007f3cc2d477c684c209489c Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Fri, 13 Apr 2018 00:15:35 +0200 Subject: [PATCH 19/21] Deprecation API Change - Use of newly implemented decorator. Docstring updated --- tensorlayer/layers/binary.py | 109 +++---- tensorlayer/layers/convolution.py | 359 +++++++++------------- tensorlayer/layers/core.py | 114 +++---- tensorlayer/layers/extend.py | 33 +- tensorlayer/layers/importer.py | 59 ++-- tensorlayer/layers/merge.py | 1 - tensorlayer/layers/normalization.py | 76 ++--- tensorlayer/layers/object_detection.py | 16 +- tensorlayer/layers/padding.py | 57 ++-- tensorlayer/layers/pooling.py | 227 +++++--------- tensorlayer/layers/recurrent.py | 60 ++-- tensorlayer/layers/shape.py | 53 +--- tensorlayer/layers/spatial_transformer.py | 18 +- tensorlayer/layers/special_activation.py | 15 +- tensorlayer/layers/stack.py | 18 +- tensorlayer/layers/super_resolution.py | 30 +- tensorlayer/layers/time_distribution.py | 10 +- 17 files changed, 512 insertions(+), 743 deletions(-) diff --git a/tensorlayer/layers/binary.py b/tensorlayer/layers/binary.py index e0d5f4695..4987fe767 100644 --- a/tensorlayer/layers/binary.py +++ b/tensorlayer/layers/binary.py @@ -4,6 +4,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'BinaryDenseLayer', 'BinaryConv2d', @@ -124,10 +126,10 @@ class BinaryDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_units=100, act=tf.identity, use_gemm=False, @@ -137,10 +139,8 @@ def __init__( b_init_args=None, name='binary_dense', ): - # super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BinaryDenseLayer %s: %d %s" % (name, n_units, act.__name__)) self.inputs = prev_layer.outputs @@ -157,7 +157,7 @@ def __init__( n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("BinaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -234,10 +234,10 @@ class BinaryConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -262,10 +262,9 @@ def __init__( # data_format=None, name='binary_cnn2d', ): - # super(BinaryConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(BinaryConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(BinaryConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) self.inputs = prev_layer.outputs @@ -278,9 +277,6 @@ def __init__( if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) - if len(strides) != 2: raise ValueError("len(strides) should be 2.") try: @@ -335,10 +331,10 @@ class TernaryDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_units=100, act=tf.identity, use_gemm=False, @@ -348,10 +344,8 @@ def __init__( b_init_args=None, name='ternary_dense', ): - # super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TernaryDenseLayer %s: %d %s" % (name, n_units, act.__name__)) self.inputs = prev_layer.outputs @@ -367,7 +361,7 @@ def __init__( n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("TernaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -446,10 +440,10 @@ class TernaryConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -474,10 +468,9 @@ def __init__( # data_format=None, name='ternary_cnn2d', ): - # super(TernaryConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(TernaryConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(TernaryConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) if W_init_args is None: W_init_args = {} @@ -488,9 +481,6 @@ def __init__( if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) - if len(strides) != 2: raise ValueError("len(strides) should be 2.") try: @@ -527,7 +517,7 @@ class DorefaDenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. bitW : int The bits of this layer's parameter @@ -552,10 +542,10 @@ class DorefaDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, bitW=1, bitA=3, n_units=100, @@ -567,10 +557,8 @@ def __init__( b_init_args=None, name='dorefa_dense', ): - # super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DorefaDenseLayer %s: %d %s" % (name, n_units, act.__name__)) self.inputs = prev_layer.outputs @@ -586,7 +574,7 @@ def __init__( n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("DorefaDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -620,7 +608,7 @@ class DorefaConv2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. bitW : int The bits of this layer's parameter @@ -668,10 +656,10 @@ class DorefaConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, bitW=1, bitA=3, n_filter=32, @@ -698,10 +686,9 @@ def __init__( # data_format=None, name='dorefa_cnn2d', ): - # super(DorefaConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DorefaConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DorefaConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) self.inputs = prev_layer.outputs @@ -715,9 +702,6 @@ def __init__( if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) - if len(strides) != 2: raise ValueError("len(strides) should be 2.") try: @@ -750,23 +734,20 @@ class SignLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. name : a str A unique layer name. """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, name='sign', ): - # super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(SignLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -784,7 +765,7 @@ class ScaleLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. init_scale : float The initial value for the scale factor. @@ -793,22 +774,18 @@ class ScaleLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, init_scale=0.05, name='scale', ): - # super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ScaleLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ScaleLayer %s: init_scale: %f" % (name, init_scale)) self.inputs = prev_layer.outputs - logging.info("ScaleLayer %s: init_scale: %f" % (self.name, init_scale)) - with tf.variable_scope(name): # scale = tf.get_variable(name='scale_factor', init, trainable=True, ) scale = tf.get_variable("scale", shape=[1], initializer=tf.constant_initializer(value=init_scale)) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index 7dc871854..3957feefb 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- -import warnings import tensorflow as tf from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'Conv1dLayer', 'Conv2dLayer', @@ -34,7 +35,7 @@ class Conv1dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -61,10 +62,10 @@ class Conv1dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, act=tf.identity, shape=(5, 1, 5), stride=1, @@ -77,10 +78,8 @@ def __init__( b_init_args=None, name='cnn1d', ): - # super(Conv1dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(Conv1dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(Conv1dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (name, str(shape), str(stride), padding, act.__name__)) self.inputs = prev_layer.outputs @@ -91,8 +90,6 @@ def __init__( if b_init_args is None: b_init_args = {} - logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__)) - with tf.variable_scope(name): W = tf.get_variable(name='W_conv1d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) self.outputs = tf.nn.convolution( @@ -119,7 +116,7 @@ class Conv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -181,10 +178,10 @@ class Conv2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, act=tf.identity, shape=(5, 5, 1, 100), strides=(1, 1, 1, 1), @@ -197,10 +194,8 @@ def __init__( data_format=None, name='cnn_layer', ): - # super(Conv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(Conv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(Conv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) self.inputs = prev_layer.outputs @@ -211,8 +206,6 @@ def __init__( if act is None: act = tf.identity - logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) - with tf.variable_scope(name): W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init: @@ -236,7 +229,7 @@ class DeConv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -306,10 +299,10 @@ class DeConv2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, act=tf.identity, shape=(3, 3, 128, 256), output_shape=(1, 256, 256, 128), @@ -321,10 +314,9 @@ def __init__( b_init_args=None, name='decnn2d_layer', ): - # super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) self.inputs = prev_layer.outputs @@ -335,8 +327,6 @@ def __init__( if act is None: act = tf.identity - logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, - act.__name__)) # logging.info(" DeConv2dLayer: Untested") with tf.variable_scope(name): W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -362,7 +352,7 @@ class Conv3dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -392,10 +382,10 @@ class Conv3dLayer(Layer): ... [None, 50, 50, 50, 32] """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, act=tf.identity, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1), @@ -406,10 +396,8 @@ def __init__( b_init_args=None, name='cnn3d_layer', ): - # super(Conv3dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(Conv3dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(Conv3dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) self.inputs = prev_layer.outputs @@ -420,8 +408,6 @@ def __init__( if act is None: act = tf.identity - logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) - with tf.variable_scope(name): # W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv') # b = tf.Variable(b_init(shape=[shape[-1]], **b_init_args), name='b_conv') @@ -449,7 +435,7 @@ class DeConv3dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -475,10 +461,10 @@ class DeConv3dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, act=tf.identity, shape=(2, 2, 2, 128, 256), output_shape=(1, 12, 32, 32, 128), @@ -490,10 +476,9 @@ def __init__( b_init_args=None, name='decnn3d_layer', ): - # super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) self.inputs = prev_layer.outputs @@ -504,9 +489,6 @@ def __init__( if act is None: act = tf.identity - logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, - act.__name__)) - with tf.variable_scope(name): W = tf.get_variable(name='W_deconv3d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init: @@ -530,7 +512,7 @@ class UpSampling2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with 4-D Tensor of the shape (batch, height, width, channels) or 3-D Tensor of the shape (height, width, channels). size : tuple of int/float (height, width) scale factor or new size of height and width. @@ -549,20 +531,18 @@ class UpSampling2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, size=list(), is_scale=True, method=0, align_corners=False, name='upsample2d_layer', ): - # super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) self.inputs = prev_layer.outputs @@ -583,8 +563,6 @@ def __init__( else: raise Exception("Donot support shape %s" % self.inputs.get_shape()) - logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) - with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -602,7 +580,7 @@ class DownSampling2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with 4-D Tensor in the shape of (batch, height, width, channels) or 3-D Tensor in the shape of (height, width, channels). size : tuple of int/float (height, width) scale factor or new size of height and width. @@ -621,20 +599,18 @@ class DownSampling2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, size=list(), is_scale=True, method=0, align_corners=False, name='downsample2d_layer', ): - # super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) self.inputs = prev_layer.outputs @@ -653,8 +629,6 @@ def __init__( else: raise Exception("Do not support shape %s" % self.inputs.get_shape()) - logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) - with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -673,7 +647,7 @@ class DeformableConv2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. offset_layer : :class:`Layer` To predict the offset of convolution operations. @@ -715,10 +689,10 @@ class DeformableConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, offset_layer=None, # shape=(3, 3, 1, 100), n_filter=32, @@ -729,6 +703,7 @@ def __init__( b_init=tf.constant_initializer(value=0.0), W_init_args=None, b_init_args=None): + if tf.__version__ < "1.4": raise Exception("Deformable CNN layer requires tensrflow 1.4 or higher version | current version %s" % tf.__version__) @@ -866,17 +841,14 @@ def _tf_batch_map_offsets(inputs, offsets, grid_offset): return mapped_vals - # super(DeformableConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DeformableConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DeformableConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % (name, n_filter, str(filter_size), act.__name__)) self.inputs = prev_layer.outputs self.offset_layer = offset_layer if act is None: act = tf.identity - logging.info("DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % (self.name, n_filter, str(filter_size), act.__name__)) try: pre_channel = int(prev_layer.outputs.get_shape()[-1]) @@ -950,8 +922,9 @@ def _tf_batch_map_offsets(inputs, offsets, grid_offset): self.all_params.append(W) +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def atrous_conv1d( - layer, + prev_layer, n_filter=32, filter_size=2, stride=1, @@ -969,7 +942,7 @@ def atrous_conv1d( Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1009,9 +982,9 @@ def atrous_conv1d( b_init_args = {} return Conv1dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + shape=(filter_size, int(prev_layer.outputs.get_shape()[-1]), n_filter), stride=stride, padding=padding, dilation_rate=dilation, @@ -1030,7 +1003,7 @@ class AtrousConv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with a 4D output tensor in the shape of (batch, height, width, channels). n_filter : int The number of filters. @@ -1057,25 +1030,22 @@ class AtrousConv2dLayer(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - n_filter=32, - filter_size=(3, 3), - rate=2, - act=tf.identity, - padding='SAME', - W_init=tf.truncated_normal_initializer(stddev=0.02), - b_init=tf.constant_initializer(value=0.0), - W_init_args=None, - b_init_args=None, - name='atrou2d'): - - # super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, + prev_layer, + n_filter=32, + filter_size=(3, 3), + rate=2, + act=tf.identity, + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='atrou2d'): + + super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (name, n_filter, filter_size, rate, padding, act.__name__)) self.inputs = prev_layer.outputs @@ -1086,8 +1056,6 @@ def __init__( if act is None: act = tf.identity - logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__)) - with tf.variable_scope(name): shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -1114,7 +1082,7 @@ class _SeparableConv2dLayer(Layer): # TODO Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with a 4D output tensor in the shape of [batch, height, width, channels]. n_filter : int The number of filters. @@ -1161,32 +1129,30 @@ class _SeparableConv2dLayer(Layer): # TODO """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - n_filter=16, - filter_size=5, - strides=(1, 1), - padding='valid', - data_format='channels_last', - dilation_rate=(1, 1), - depth_multiplier=1, - act=tf.identity, - use_bias=True, - depthwise_initializer=None, - pointwise_initializer=None, - bias_initializer=tf.zeros_initializer, - depthwise_regularizer=None, - pointwise_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - name='atrou2d'): - - # super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, + prev_layer, + n_filter=16, + filter_size=5, + strides=(1, 1), + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + depth_multiplier=1, + act=tf.identity, + use_bias=True, + depthwise_initializer=None, + pointwise_initializer=None, + bias_initializer=tf.zeros_initializer, + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + name='atrou2d'): + + super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SeparableConv2dLayer %s: n_filter:%d filter_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % + (name, n_filter, filter_size, str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) self.inputs = prev_layer.outputs @@ -1195,9 +1161,6 @@ def __init__( bias_initializer = bias_initializer() - logging.info("SeparableConv2dLayer %s: n_filter:%d filter_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % - (self.name, n_filter, filter_size, str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) - with tf.variable_scope(name) as vs: self.outputs = tf.layers.separable_conv2d( self.inputs, @@ -1295,8 +1258,9 @@ def deconv2d_bilinear_upsampling_initializer(shape): return bilinear_weights_init +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def conv1d( - layer, + prev_layer, n_filter=32, filter_size=5, stride=1, @@ -1314,7 +1278,7 @@ def conv1d( Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer n_filter : int The number of filters @@ -1371,9 +1335,9 @@ def conv1d( b_init_args = {} return Conv1dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + shape=(filter_size, int(prev_layer.outputs.get_shape()[-1]), n_filter), stride=stride, dilation_rate=dilation_rate, padding=padding, @@ -1389,13 +1353,12 @@ def conv1d( # TODO: DeConv1d -# def conv2d( class Conv2d(Layer): """Simplified version of :class:`Conv2dLayer`. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1441,10 +1404,10 @@ class Conv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -1485,10 +1448,7 @@ def __init__( # data_format=data_format, # name=name) - # super(Conv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(Conv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(Conv2d, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -1558,9 +1518,9 @@ def __init__( self.all_params.append(W) -def deconv2d(layer, - n_filter=32, - n_out_channel=None, +@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release +def deconv2d(prev_layer, + n_filter, filter_size=(3, 3), out_size=(30, 30), strides=(2, 2), @@ -1576,7 +1536,7 @@ def deconv2d(layer, Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1610,30 +1570,27 @@ def deconv2d(layer, A :class:`DeConv2dLayer` object. """ + + logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} if act is None: act = tf.identity - if n_out_channel is not None: - warnings.warn("deprecated", DeprecationWarning) - logging.warning( - "DeprecationWarning: `n_out_channel` argument in tl.layers.DeConv2d is deprecated and will be removed in 1.9, please change for `n_filter`") - n_filter = n_out_channel if len(strides) != 2: raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.") if tf.__version__ > '1.3': - logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) - inputs = layer.outputs + inputs = prev_layer.outputs scope_name = tf.get_variable_scope().name # if scope_name: # whole_name = scope_name + '/' + name # else: # whole_name = name - net_new = Layer(name=name) #whole_name) + net_new = Layer(prev_layer=None, name=name) # with tf.name_scope(name): with tf.variable_scope(name) as vs: net_new.outputs = tf.contrib.layers.conv2d_transpose( @@ -1647,25 +1604,25 @@ def deconv2d(layer, biases_initializer=b_init, scope=name) new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - net_new.all_layers = list(layer.all_layers) - net_new.all_params = list(layer.all_params) - net_new.all_drop = dict(layer.all_drop) + net_new.all_layers = list(prev_layer.all_layers) + net_new.all_params = list(prev_layer.all_params) + net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.extend([net_new.outputs]) net_new.all_params.extend(new_variables) return net_new else: if batch_size is None: # batch_size = tf.shape(net.outputs)[0] - fixed_batch_size = layer.outputs.get_shape().with_rank_at_least(1)[0] + fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value else: from tensorflow.python.ops import array_ops - batch_size = array_ops.shape(layer.outputs)[0] + batch_size = array_ops.shape(prev_layer.outputs)[0] return DeConv2dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size[0], filter_size[1], n_filter, int(layer.outputs.get_shape()[-1])), + shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])), output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter), strides=(1, strides[0], strides[1], 1), padding=padding, @@ -1681,7 +1638,7 @@ class DeConv3d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1702,28 +1659,23 @@ class DeConv3d(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - n_filter=32, - filter_size=(3, 3, 3), - strides=(2, 2, 2), - padding='SAME', - act=tf.identity, - W_init=tf.truncated_normal_initializer(stddev=0.02), - b_init=tf.constant_initializer(value=0.0), - name='decnn3d'): - - # super(DeConv3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DeConv3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, + prev_layer, + n_filter=32, + filter_size=(3, 3, 3), + strides=(2, 2, 2), + padding='SAME', + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + name='decnn3d'): + + super(DeConv3d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) self.inputs = prev_layer.outputs - logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) - with tf.variable_scope(name) as vs: self.outputs = tf.contrib.layers.conv3d_transpose( inputs=self.inputs, @@ -1755,7 +1707,7 @@ class DepthwiseConv2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. filter_size : tuple of int The filter size (height, width). @@ -1803,10 +1755,10 @@ class DepthwiseConv2d(Layer): """ # # https://zhuanlan.zhihu.com/p/31551004 https://github.com/xiaohu2015/DeepLearning_tutorials/blob/master/CNNs/MobileNet.py + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, shape=(3, 3), strides=(1, 1), act=tf.identity, @@ -1819,10 +1771,8 @@ def __init__( b_init_args=None, name='depthwise_conv2d', ): - # super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) self.inputs = prev_layer.outputs @@ -1833,8 +1783,6 @@ def __init__( if act is None: act = tf.identity - logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) - try: pre_channel = int(prev_layer.outputs.get_shape()[-1]) except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net @@ -1877,7 +1825,7 @@ class SeparableConv2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1904,10 +1852,10 @@ class SeparableConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_filter=100, filter_size=(3, 3), strides=(1, 1), @@ -1938,17 +1886,13 @@ def __init__( # if b_init_args is None: # b_init_args = {} - # super(SeparableConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(SeparableConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(SeparableConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" % (self.name, n_filter, str(filter_size), + str(strides), depth_multiplier, + act.__name__)) self.inputs = prev_layer.outputs - # print(self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__) - logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" \ - % (self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__)) - with tf.variable_scope(name) as vs: self.outputs = tf.layers.separable_conv2d( inputs=self.inputs, @@ -1984,7 +1928,7 @@ class GroupConv2d(Layer): Parameters -------------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -2010,10 +1954,10 @@ class GroupConv2d(Layer): A unique layer name. """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_filter=32, filter_size=(3, 3), strides=(2, 2), @@ -2027,10 +1971,9 @@ def __init__( name='groupconv', ): # Windaway - # super(GroupConv2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GroupConv2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(GroupConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), n_group, + padding, act.__name__)) self.inputs = prev_layer.outputs @@ -2042,8 +1985,6 @@ def __init__( groupConv = lambda i, k: tf.nn.conv2d(i, k, strides=[1, strides[0], strides[1], 1], padding=padding) channels = int(self.inputs.get_shape()[-1]) - logging.info("GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), n_group, - padding, act.__name__)) with tf.variable_scope(name): We = tf.get_variable( name='W', diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index ae78583a1..01d740f96 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import time -import warnings import numpy as np import tensorflow as tf @@ -10,6 +9,8 @@ from .. import _logging as logging from .. import files, iterate, utils, visualize +from ..deprecation import deprecated_alias + __all__ = [ 'LayersConfig', 'TF_GRAPHKEYS_VARIABLES', @@ -381,11 +382,14 @@ class Layer(object): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name=None): + inputs = None + outputs = None + all_layers = [] + all_params = [] + all_drop = {} + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name=None): if name is None: raise ValueError('Layer must have a name.') @@ -395,15 +399,6 @@ def __init__( name = scope_name + '/' + name self.name = name - # TODO remove this whole block for the 1.9 release - # ==== START Deprecation warning for layer ===== - if layer is not None: - warnings.warn("deprecated", DeprecationWarning) - logging.warning("DeprecationWarning: `layer` argument in %s.%s is deprecated and will be removed in 1.9, please change for `prev_layer`" % - (self.__module__, self.__class__.__name__)) - prev_layer = layer - # ==== END Deprecation warning for layer ===== - # get all properties of previous layer(s) if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer # Hint : list(), dict() is pass by value (shallow), without them, @@ -468,7 +463,7 @@ def __str__(self): return " Last layer is: %s (%s) %s" % (self.__class__.__name__, self.name, self.outputs.get_shape().as_list()) def __getitem__(self, key): - net_new = Layer(name=self.name) + net_new = Layer(prev_layer=None, name=self.name) net_new.inputs = self.inputs net_new.outputs = self.outputs[key] @@ -507,8 +502,10 @@ class InputLayer(Layer): """ def __init__(self, inputs=None, name='input'): - Layer.__init__(self, name=name) + + super(InputLayer, self).__init__(prev_layer=None, name=name) logging.info("InputLayer %s: %s" % (self.name, inputs.get_shape())) + self.outputs = inputs self.all_layers = [] self.all_params = [] @@ -545,8 +542,10 @@ class OneHotInputLayer(Layer): """ def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): - Layer.__init__(self, name=name) + + super(OneHotInputLayer, self).__init__(prev_layer=None, name=name) logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) + # assert depth != None, "depth is not given" if depth is None: logging.info(" [*] depth == None the number of output units is undefined") @@ -667,10 +666,11 @@ def __init__( if nce_b_init_args is None: nce_b_init_args = {} - Layer.__init__(self, name=name) - self.inputs = inputs + super(Word2vecEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + self.inputs = inputs + # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix @@ -758,10 +758,11 @@ def __init__( if E_init_args is None: E_init_args = {} - Layer.__init__(self, name=name) - self.inputs = inputs + super(EmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + self.inputs = inputs + with tf.variable_scope(name): embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) @@ -821,8 +822,9 @@ def __init__( embeddings_kwargs=None, name='average_embedding', ): - # super().__init__(name=name) # dont work for py2 - Layer.__init__(self, name=name) + + super(AverageEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) + logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) # if embeddings_kwargs is None: # embeddings_kwargs = {} @@ -832,7 +834,6 @@ def __init__( self.inputs = inputs - logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) with tf.variable_scope(name): self.embeddings = tf.get_variable( name='embeddings', @@ -883,7 +884,7 @@ class DenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_units : int The number of units of this layer. @@ -920,10 +921,10 @@ class DenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, n_units=100, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=0.1), @@ -932,10 +933,9 @@ def __init__( b_init_args=None, name='dense', ): - # super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + + super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DenseLayer %s: %d %s" % (name, n_units, act.__name__)) self.inputs = prev_layer.outputs self.n_units = n_units @@ -950,8 +950,6 @@ def __init__( n_in = int(self.inputs.get_shape()[-1]) - logging.info("DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) - with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: @@ -977,7 +975,7 @@ class ReconLayer(DenseLayer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. x_recon : placeholder or tensor The target for reconstruction. @@ -1016,19 +1014,18 @@ class ReconLayer(DenseLayer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, x_recon=None, n_units=784, act=tf.nn.softplus, name='recon', ): - # super(ReconLayer, self).__init__(prev_layer=prev_layer, name=name, n_units=n_units, act=act, name=name) # TODO replace the line below with this line for the 1.9 release - super(ReconLayer, self).__init__(prev_layer=prev_layer, layer=layer, n_units=n_units, act=act, name=name) + super(ReconLayer, self).__init__(prev_layer=prev_layer, n_units=n_units, act=act, name=name) - logging.info("%s is a ReconLayer" % self.name) + logging.info("ReconLayer %s" % self.name) # y : reconstruction outputs; train_params : parameters to train # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] @@ -1172,7 +1169,7 @@ class DropoutLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. @@ -1223,20 +1220,18 @@ class DropoutLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, keep=0.5, is_fix=False, is_train=True, seed=None, name='dropout_layer', ): - # super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DropoutLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (name, keep, is_fix)) if is_train is False: logging.info(" skip DropoutLayer") @@ -1246,7 +1241,6 @@ def __init__( # self.all_drop = dict(layer.all_drop) else: self.inputs = prev_layer.outputs - logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) # The name of placeholder for keep_prob is the same with the name # of the Layer. @@ -1288,7 +1282,7 @@ class GaussianNoiseLayer(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. mean : float The mean. Default is 0. @@ -1311,20 +1305,17 @@ class GaussianNoiseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, mean=0.0, stddev=1.0, is_train=True, seed=None, name='gaussian_noise_layer', ): - # super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, name=name) if is_train is False: logging.info(" skip GaussianNoiseLayer") @@ -1353,7 +1344,7 @@ class DropconnectDenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. @@ -1389,10 +1380,10 @@ class DropconnectDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, keep=0.5, n_units=100, act=tf.identity, @@ -1402,10 +1393,8 @@ def __init__( b_init_args=None, name='dropconnect_layer', ): - # super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DropconnectDenseLayer %s: %d %s" % (name, n_units, act.__name__)) if W_init_args is None: W_init_args = {} @@ -1418,7 +1407,6 @@ def __init__( raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("DropconnectDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 3d348e52a..3a078f2c0 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'ExpandDimsLayer', 'TileLayer', @@ -18,7 +20,7 @@ class ExpandDimsLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. axis : int The dimension index at which to expand the shape of input. @@ -33,21 +35,18 @@ class ExpandDimsLayer(Layer): ... [None, 100, 1] """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, axis=0, name='expand_dims', ): - # super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ExpandDimsLayer %s: axis:%d" % (name, axis)) self.inputs = prev_layer.outputs - logging.info("ExpandDimsLayer %s: axis:%d" % (self.name, axis)) with tf.variable_scope(name): try: # TF12 TF1.0 self.outputs = tf.expand_dims(self.inputs, axis=axis) @@ -67,7 +66,7 @@ class TileLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. multiples: tensor Must be one of the following types: int32, int64. @@ -85,21 +84,13 @@ class TileLayer(Layer): ... [None, 100, 3] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - multiples=None, - name='tile', - ): - # super(TileLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(TileLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, multiples=None, name='tile'): + super(TileLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TileLayer %s: multiples:%s" % (name, multiples)) self.inputs = prev_layer.outputs - logging.info("TileLayer %s: multiples:%s" % (self.name, multiples)) with tf.variable_scope(name): self.outputs = tf.tile(self.inputs, multiples=multiples) # self.all_layers = list(layer.all_layers) diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py index 39c222010..f85bb17b9 100644 --- a/tensorlayer/layers/importer.py +++ b/tensorlayer/layers/importer.py @@ -6,6 +6,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'LambdaLayer', 'SlimNetsLayer', @@ -19,7 +21,7 @@ class LambdaLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. fn : function The function that applies to the outputs of previous layer. @@ -53,19 +55,17 @@ class LambdaLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, fn=None, fn_args=None, name='lambda_layer', ): - # super(LambdaLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(LambdaLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(LambdaLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LambdaLayer %s" % name) self.inputs = prev_layer.outputs @@ -75,7 +75,6 @@ def __init__( assert prev_layer is not None assert fn is not None - logging.info("LambdaLayer %s" % self.name) with tf.variable_scope(name) as vs: self.outputs = fn(self.inputs, **fn_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -95,7 +94,7 @@ class SlimNetsLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. slim_layer : a slim network function The network you want to stack onto, end with ``return net, end_points``. @@ -110,19 +109,17 @@ class SlimNetsLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - slim_layer=None, + prev_layer, + slim_layer, slim_args=None, name='tfslim_layer', ): - # super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SlimNetsLayer %s: %s" % (name, slim_layer.__name__)) self.inputs = prev_layer.outputs @@ -131,8 +128,6 @@ def __init__( if slim_args is None: slim_args = {} - logging.info("SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) - # with tf.variable_scope(name) as vs: # net, end_points = slim_layer(self.inputs, **slim_args) # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -168,7 +163,7 @@ class KerasLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer keras_layer : function A tensor in tensor out function for building model. @@ -179,19 +174,17 @@ class KerasLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - keras_layer=None, + prev_layer, + keras_layer, keras_args=None, name='keras_layer', ): - # super(KerasLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(KerasLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(KerasLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("KerasLayer %s: %s" % (name, keras_layer)) self.inputs = prev_layer.outputs @@ -200,7 +193,6 @@ def __init__( if keras_args is None: keras_args = {} - logging.info("KerasLayer %s: %s" % (self.name, keras_layer)) logging.warning("This API will be removed, please use LambdaLayer instead.") with tf.variable_scope(name) as vs: @@ -221,7 +213,7 @@ class EstimatorLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer model_fn : function A tensor in tensor out function for building model. @@ -232,18 +224,16 @@ class EstimatorLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, model_fn=None, args=None, name='estimator_layer', ): - # super(EstimatorLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(EstimatorLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(EstimatorLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("EstimatorLayer %s: %s" % (name, model_fn)) self.inputs = prev_layer.outputs @@ -252,7 +242,6 @@ def __init__( if args is None: args = {} - logging.info("EstimatorLayer %s: %s" % (self.name, model_fn)) logging.warning("This API will be removed, please use LambdaLayer instead.") with tf.variable_scope(name) as vs: diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 4da8ed604..66fe05f98 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -124,7 +124,6 @@ def __init__( ): super(ElementwiseLayer, self).__init__(prev_layer=layers, name=name) - logging.info("ElementwiseLayer %s: size:%s fn:%s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__)) self.outputs = layers[0].outputs diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index f5b77e478..febc70270 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'LocalResponseNormLayer', 'BatchNormLayer', @@ -21,7 +23,7 @@ class LocalResponseNormLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a 4D output shape. depth_radius : int Depth radius. 0-D. Half-width of the 1-D normalization window. @@ -36,25 +38,21 @@ class LocalResponseNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, depth_radius=None, bias=None, alpha=None, beta=None, name='lrn_layer', ): - # super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (name, str(depth_radius), str(bias), str(alpha), str(beta))) self.inputs = prev_layer.outputs - logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (self.name, str(depth_radius), str(bias), str(alpha), - str(beta))) with tf.variable_scope(name): self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) @@ -71,7 +69,7 @@ class BatchNormLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. decay : float A decay factor for `ExponentialMovingAverage`. @@ -99,10 +97,10 @@ class BatchNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, decay=0.9, epsilon=0.00001, act=tf.identity, @@ -111,14 +109,11 @@ def __init__( gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), name='batchnorm_layer', ): - # super(BatchNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(BatchNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(BatchNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (name, decay, epsilon, act.__name__, is_train)) self.inputs = prev_layer.outputs - logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train)) x_shape = self.inputs.get_shape() params_shape = x_shape[-1:] @@ -204,7 +199,7 @@ class InstanceNormLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. act : activation function. The activation function of this layer. @@ -215,6 +210,7 @@ class InstanceNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -222,10 +218,11 @@ def __init__( epsilon=1e-5, name='instan_norm', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs + super(InstanceNormLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("InstanceNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__)) + self.inputs = prev_layer.outputs + with tf.variable_scope(name) as vs: mean, var = tf.nn.moments(self.inputs, [1, 2], keep_dims=True) scale = tf.get_variable( @@ -248,7 +245,7 @@ class LayerNormLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. act : activation function The activation function of this layer. @@ -257,30 +254,25 @@ class LayerNormLayer(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - center=True, - scale=True, - act=tf.identity, - reuse=None, - variables_collections=None, - outputs_collections=None, - trainable=True, - begin_norm_axis=1, - begin_params_axis=-1, - name='layernorm'): - - # super(LayerNormLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(LayerNormLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, + prev_layer, + center=True, + scale=True, + act=tf.identity, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + begin_norm_axis=1, + begin_params_axis=-1, + name='layernorm'): + + super(LayerNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LayerNormLayer %s: act:%s" % (name, act.__name__)) self.inputs = prev_layer.outputs - logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__)) - if tf.__version__ < "1.3": # raise Exception("Please use TF 1.3+") with tf.variable_scope(name) as vs: diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py index fdbf7637f..e9c8b7158 100644 --- a/tensorlayer/layers/object_detection.py +++ b/tensorlayer/layers/object_detection.py @@ -3,6 +3,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'ROIPoolingLayer', ] @@ -14,7 +16,7 @@ class ROIPoolingLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. rois : tuple of int Regions of interest in the format of (feature map index, upper left, bottom right). @@ -32,24 +34,20 @@ class ROIPoolingLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, rois=list(), pool_height=2, pool_width=2, name='roipooling_layer', ): - # super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ROIPoolingLayer %s: (%d, %d)" % (name, pool_height, pool_width)) self.inputs = prev_layer.outputs - logging.info("ROIPoolingLayer %s: (%d, %d)" % (self.name, pool_height, pool_width)) - try: from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import roi_pooling except Exception as e: diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 12a497581..c85bc98ee 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PadLayer', 'ZeroPad1d', @@ -19,7 +21,7 @@ class PadLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : list of lists of 2 ints, or a Tensor of type int32. The int32 values to pad. @@ -35,25 +37,22 @@ class PadLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, padding=None, mode='CONSTANT', name='pad_layer', ): - # super(PadLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(PadLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(PadLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PadLayer %s: padding:%s mode:%s" % (name, list(padding), mode)) self.inputs = prev_layer.outputs if padding is None: raise Exception("padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad") - logging.info("PadLayer %s: padding:%s mode:%s" % (self.name, list(padding), mode)) self.outputs = tf.pad(self.inputs, paddings=padding, mode=mode, name=name) self.all_layers.append(self.outputs) @@ -64,7 +63,7 @@ class ZeroPad1d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints - If int, zeros to add at the beginning and end of the padding dimension (axis 1). @@ -74,22 +73,18 @@ class ZeroPad1d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, padding=1, name='zeropad1d', ): - # super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ZeroPad1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad1d %s: padding:%s" % (name, str(padding))) self.inputs = prev_layer.outputs - logging.info("ZeroPad1d %s: padding:%s" % (self.name, str(padding))) - assert isinstance(padding, (int, tuple, dict)) self.outputs = tf.keras.layers.ZeroPadding1D(padding=padding, name=name)(self.inputs) @@ -102,7 +97,7 @@ class ZeroPad2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. @@ -113,22 +108,18 @@ class ZeroPad2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, padding=1, name='zeropad2d', ): - # super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ZeroPad2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad2d %s: padding:%s" % (name, str(padding))) self.inputs = prev_layer.outputs - logging.info("ZeroPad2d %s: padding:%s" % (self.name, str(padding))) - assert isinstance(padding, (int, tuple)) self.outputs = tf.keras.layers.ZeroPadding2D(padding=padding, name=name)(self.inputs) @@ -141,7 +132,7 @@ class ZeroPad3d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. @@ -154,20 +145,14 @@ class ZeroPad3d(Layer): def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, padding=1, name='zeropad3d', ): - # super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ZeroPad3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad3d %s: padding:%s" % (name, str(padding))) self.inputs = prev_layer.outputs - - logging.info("ZeroPad3d %s: padding:%s" % (self.name, str(padding))) - assert isinstance(padding, (int, tuple)) self.outputs = tf.keras.layers.ZeroPadding3D(padding=padding, name=name)(self.inputs) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 3442dc434..5bd5989dd 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -7,6 +7,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PoolLayer', 'MaxPool1d', @@ -32,7 +34,7 @@ class PoolLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. ksize : tuple of int The size of the window for each dimension of the input tensor. @@ -54,35 +56,32 @@ class PoolLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', pool=tf.nn.max_pool, name='pool_layer', ): - # super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(PoolLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (name, str(ksize), str(strides), padding, pool.__name__)) self.inputs = prev_layer.outputs - logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (self.name, str(ksize), str(strides), padding, pool.__name__)) - self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) self.all_layers.append(self.outputs) -def maxpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def maxpool1d(prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): """Max pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.max_pooling1d `__ . Parameters ---------- - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. filter_size : tuple of int Pooling window size. @@ -105,20 +104,21 @@ def maxpool1d(net, filter_size=3, strides=2, padding='valid', data_format='chann """ logging.info("MaxPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.max_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + outputs = tf.layers.max_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - net_new = copy.copy(net) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new -def meanpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def meanpool1d(prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): """Mean pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.average_pooling1d `__ . Parameters ------------ - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. filter_size : tuple of int Pooling window size. @@ -141,20 +141,21 @@ def meanpool1d(net, filter_size=3, strides=2, padding='valid', data_format='chan """ logging.info("MeanPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.average_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + outputs = tf.layers.average_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - net_new = copy.copy(net) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new -def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def maxpool2d(prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool'): """Max pooling for 2D image [batch, height, width, channel]. Wrapper for :class:`PoolLayer`. Parameters ----------- - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. filter_size : tuple of int (height, width) for filter size. @@ -174,8 +175,8 @@ def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='max if strides is None: strides = filter_size if tf.__version__ > '1.5': - outputs = tf.layers.max_pooling2d(net.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) - net_new = copy.copy(net) + outputs = tf.layers.max_pooling2d(prev_layer.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new @@ -183,18 +184,19 @@ def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='max assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." - net = PoolLayer( - net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) + prev_layer = PoolLayer( + prev_layer, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) - return net + return prev_layer -def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def meanpool2d(prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool'): """Mean pooling for 2D image [batch, height, width, channel]. Wrapper for :class:`PoolLayer`. Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. filter_size : tuple of int (height, width) for filter size. @@ -214,8 +216,8 @@ def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='me if strides is None: strides = filter_size if tf.__version__ > '1.5': - outputs = tf.layers.average_pooling2d(net.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) - net_new = copy.copy(net) + outputs = tf.layers.average_pooling2d(prev_layer.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new @@ -223,9 +225,9 @@ def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='me assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." - net = PoolLayer( - net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) - return net + prev_layer = PoolLayer( + prev_layer, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) + return prev_layer # def maxpool3d(net, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): @@ -234,7 +236,7 @@ class MaxPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. filter_size : tuple of int Pooling window size. @@ -257,24 +259,14 @@ class MaxPool3d(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - filter_size=(3, 3, 3), - strides=(2, 2, 2), - padding='valid', - data_format='channels_last', - name='maxpool3d'): - # super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(MaxPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer - - self.inputs = prev_layer.outputs + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): + super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("MaxPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + self.inputs = prev_layer.outputs + self.outputs = tf.layers.max_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) # update layer (customized) self.all_layers.append(self.outputs) @@ -286,7 +278,7 @@ class MeanPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. filter_size : tuple of int Pooling window size. @@ -309,28 +301,17 @@ class MeanPool3d(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - filter_size=(3, 3, 3), - strides=(2, 2, 2), - padding='valid', - data_format='channels_last', - name='meanpool3d'): - - # super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(MeanPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer - - self.inputs = prev_layer.outputs + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='meanpool3d'): - # print out info (customized) + super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("MeanPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.layers.average_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -340,7 +321,7 @@ class GlobalMaxPool1d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. name : str A unique layer name. @@ -353,24 +334,16 @@ class GlobalMaxPool1d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmaxpool1d', - ): - # super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool1d'): + super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GlobalMaxPool1d %s" % name) self.inputs = prev_layer.outputs - # print out info (customized) - logging.info("GlobalMaxPool1d %s" % name) - # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=1, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -380,7 +353,7 @@ class GlobalMeanPool1d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. name : str A unique layer name. @@ -393,24 +366,16 @@ class GlobalMeanPool1d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmeanpool1d', - ): - # super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool1d'): + super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GlobalMeanPool1d %s" % name) self.inputs = prev_layer.outputs - # print out info (customized) - logging.info("GlobalMeanPool1d %s" % name) - # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=1, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -420,7 +385,7 @@ class GlobalMaxPool2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. name : str A unique layer name. @@ -433,24 +398,16 @@ class GlobalMaxPool2d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmaxpool2d', - ): - # super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool2d'): + super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GlobalMaxPool2d %s" % name) self.inputs = prev_layer.outputs - # print out info (customized) - logging.info("GlobalMaxPool2d %s" % name) - # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -460,7 +417,7 @@ class GlobalMeanPool2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. name : str A unique layer name. @@ -473,24 +430,16 @@ class GlobalMeanPool2d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmeanpool2d', - ): - # super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool2d'): + super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GlobalMeanPool2d %s" % name) self.inputs = prev_layer.outputs - # print out info (customized) - logging.info("GlobalMeanPool2d %s" % name) - # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -500,7 +449,7 @@ class GlobalMaxPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. name : str A unique layer name. @@ -513,16 +462,9 @@ class GlobalMaxPool3d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmaxpool3d', - ): - # super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool3d'): + super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -531,6 +473,7 @@ def __init__( # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2, 3], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -540,7 +483,7 @@ class GlobalMeanPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. name : str A unique layer name. @@ -553,24 +496,16 @@ class GlobalMeanPool3d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='globalmeanpool3d', - ): - # super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool3d'): + super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GlobalMeanPool3d %s" % name) self.inputs = prev_layer.outputs - # print out info (customized) - logging.info("GlobalMeanPool3d %s" % name) - # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2, 3], name=name) + # update layer (customized) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 3b7e1c817..0134de72c 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -7,6 +7,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'RNNLayer', 'BiRNNLayer', @@ -31,7 +33,7 @@ class RNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -130,10 +132,10 @@ class RNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, cell_fn=None, cell_init_args=None, n_hidden=100, @@ -144,10 +146,7 @@ def __init__( return_seq_2d=False, name='rnn', ): - # super(RNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(RNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(RNNLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -259,7 +258,7 @@ class BiRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell. @@ -321,10 +320,10 @@ class BiRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, cell_fn=None, cell_init_args=None, n_hidden=100, @@ -338,10 +337,7 @@ def __init__( return_seq_2d=False, name='birnn', ): - # super(BiRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(BiRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(BiRNNLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -646,7 +642,7 @@ class ConvLSTMLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer cell_shape : tuple of int The shape of each cell width * height @@ -695,10 +691,10 @@ class ConvLSTMLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, cell_shape=None, feature_map=1, filter_size=(3, 3), @@ -710,10 +706,7 @@ def __init__( return_seq_2d=False, name='convlstm', ): - # super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -936,7 +929,7 @@ class DynamicRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -1030,10 +1023,10 @@ class DynamicRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, cell_fn=None, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, @@ -1047,10 +1040,7 @@ def __init__( dynamic_rnn_init_args=None, name='dyrnn', ): - # super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -1215,7 +1205,7 @@ class BiDynamicRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -1288,10 +1278,10 @@ class BiDynamicRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, cell_fn=None, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, @@ -1306,10 +1296,7 @@ def __init__( dynamic_rnn_init_args=None, name='bi_dyrnn_layer', ): - # super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -1597,10 +1584,11 @@ def __init__( return_seq_2d=False, name='seq2seq', ): + super(Seq2Seq, self).__init__(prev_layer=None, name=name) + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} - Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 5ae761ce0..aab4029de 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'FlattenLayer', 'ReshapeLayer', @@ -20,7 +22,7 @@ class FlattenLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. name : str A unique layer name. @@ -34,31 +36,25 @@ class FlattenLayer(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - name='flatten', - ): - # super(FlattenLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(FlattenLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='flatten'): + super(FlattenLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs self.outputs = flatten_reshape(self.inputs, name=name) self.n_units = int(self.outputs.get_shape()[-1]) - logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) self.all_layers.append(self.outputs) + logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) + class ReshapeLayer(Layer): """A layer that reshapes a given tensor. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer shape : tuple of int The output shape, see ``tf.reshape``. @@ -75,17 +71,9 @@ class ReshapeLayer(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - shape=list(), - name='reshape', - ): - # super(ReshapeLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(ReshapeLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, shape=list(), name='reshape'): + super(ReshapeLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs @@ -105,7 +93,7 @@ class TransposeLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer perm: list of int The permutation of the dimensions, similar with ``numpy.transpose``. @@ -121,22 +109,15 @@ class TransposeLayer(Layer): """ - def __init__( - self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - perm=None, - name='transpose'): + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, perm=None, name='transpose'): - # super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(TransposeLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TransposeLayer %s: perm:%s" % (name, perm)) self.inputs = prev_layer.outputs assert perm is not None - logging.info("TransposeLayer %s: perm:%s" % (self.name, perm)) self.outputs = tf.transpose(self.inputs, perm=perm, name=name) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index ed3e3fe02..512f5ab60 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -9,6 +9,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'transformer', 'batch_transformer', @@ -209,7 +211,7 @@ class SpatialTransformer2dAffineLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. theta_layer : :class:`Layer` The localisation network. @@ -226,22 +228,14 @@ class SpatialTransformer2dAffineLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release - theta_layer=None, + prev_layer, + theta_layer, out_size=None, name='spatial_trans_2d_affine', ): - if layer is not None: - # TODO remove the whole block for the 1.9 release - warnings.warn("deprecated", DeprecationWarning) - logging.warning("DeprecationWarning: `layer` argument in %s.%s is deprecated and will be removed in 1.9, please change for `prev_layer`" % - (self.__module__, self.__class__.__name__)) - - if layer is not None: - prev_layer = layer super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=[prev_layer, theta_layer], name=name) diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py index bc01af2e3..4d5a472dd 100644 --- a/tensorlayer/layers/special_activation.py +++ b/tensorlayer/layers/special_activation.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PReluLayer', ] @@ -16,7 +18,7 @@ class PReluLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer。 channel_shared : boolean If True, single weight is shared by all channels. @@ -33,10 +35,10 @@ class PReluLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - layer=None, # TODO remove this line for the 1.9 release + prev_layer, channel_shared=False, a_init=tf.constant_initializer(value=0.0), a_init_args=None, @@ -46,14 +48,11 @@ def __init__( if a_init_args is None: a_init_args = {} - # super(PReluLayer, self).__init__(prev_layer=prev_layer, name=name) # TODO replace the 3 lines below with this line for the 1.9 release - super(PReluLayer, self).__init__(prev_layer=prev_layer, layer=layer, name=name) - if layer is not None: - prev_layer = layer + super(PReluLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PReluLayer %s: channel_shared:%s" % (name, channel_shared)) self.inputs = prev_layer.outputs - logging.info("PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) if channel_shared: w_shape = (1, ) else: diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py index 654eb0537..dd9bde8be 100644 --- a/tensorlayer/layers/stack.py +++ b/tensorlayer/layers/stack.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'StackLayer', 'UnStackLayer', @@ -42,15 +44,16 @@ def __init__( axis=1, name='stack', ): - Layer.__init__(self, prev_layer=layers, name=name) + + super(StackLayer, self).__init__(prev_layer=layers, name=name) + logging.info("StackLayer %s: axis: %d" % (name, axis)) + self.inputs = [] for l in layers: self.inputs.append(l.outputs) self.outputs = tf.stack(self.inputs, axis=axis, name=name) - logging.info("StackLayer %s: axis: %d" % (self.name, axis)) - # self.all_layers = list(layers[0].all_layers) # self.all_params = list(layers[0].all_params) # self.all_drop = dict(layers[0].all_drop) @@ -66,13 +69,14 @@ def __init__( self.all_layers.append(self.outputs) -def unstack_layer(layer, num=None, axis=0, name='unstack'): +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def unstack_layer(prev_layer, num=None, axis=0, name='unstack'): """ It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() `__. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer num : int or None The length of the dimension axis. Automatically inferred if None (the default). @@ -87,7 +91,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'): The list of layer objects unstacked from the input. """ - inputs = layer.outputs + inputs = prev_layer.outputs with tf.variable_scope(name): outputs = tf.unstack(inputs, num=num, axis=axis) @@ -101,7 +105,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'): full_name = name for i, _v in enumerate(outputs): - n = Layer(prev_layer=layer, name=full_name + str(i)) + n = Layer(prev_layer=prev_layer, name=full_name + str(i)) n.outputs = outputs[i] # n.all_layers = list(layer.all_layers) # n.all_params = list(layer.all_params) diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/super_resolution.py index d82cfd4a5..f00a6836b 100644 --- a/tensorlayer/layers/super_resolution.py +++ b/tensorlayer/layers/super_resolution.py @@ -5,19 +5,22 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'SubpixelConv1d', 'SubpixelConv2d', ] -def subpixel_conv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def subpixel_conv2d(prev_layer, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): """It is a 2D sub-pixel up-sampling layer, usually be used for Super-Resolution applications, see `SRGAN `__ for example. Parameters ------------ - net : :class:`Layer` + prev_layer : :class:`Layer` Previous layer, scale : int The up-scaling ratio, a wrong setting will lead to dimension size error. @@ -92,26 +95,27 @@ def _PS(X, r, n_out_channels): logging.info(_err_log) return X - inputs = net.outputs + inputs = prev_layer.outputs if n_out_channel is None: assert int(inputs.get_shape()[-1]) / (scale**2) % 1 == 0, _err_log n_out_channel = int(int(inputs.get_shape()[-1]) / (scale**2)) logging.info("SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) - net_new = Layer(prev_layer=net, name=name) #whole_name) + net_new = Layer(prev_layer=prev_layer, name=name) # with tf.name_scope(name): with tf.variable_scope(name): net_new.outputs = act(_PS(inputs, r=scale, n_out_channels=n_out_channel)) - # net_new.all_layers = list(net.all_layers) - # net_new.all_params = list(net.all_params) - # net_new.all_drop = dict(net.all_drop) + # net_new.all_layers = list(prev_layer.all_layers) + # net_new.all_params = list(prev_layer.all_params) + # net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.append(net_new.outputs) return net_new -def subpixel_conv1d(net, scale=2, act=tf.identity, name='subpixel_conv1d'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def subpixel_conv1d(prev_layer, scale=2, act=tf.identity, name='subpixel_conv1d'): """It is a 1D sub-pixel up-sampling layer. Calls a TensorFlow function that directly implements this functionality. @@ -155,14 +159,14 @@ def _PS(I, r): logging.info("SubpixelConv1d %s: scale: %d act: %s" % (name, scale, act.__name__)) - inputs = net.outputs - net_new = Layer(prev_layer=net, name=name) + inputs = prev_layer.outputs + net_new = Layer(prev_layer=prev_layer, name=name) with tf.name_scope(name): net_new.outputs = act(_PS(inputs, r=scale)) - # net_new.all_layers = list(net.all_layers) - # net_new.all_params = list(net.all_params) - # net_new.all_drop = dict(net.all_drop) + # net_new.all_layers = list(prev_layer.all_layers) + # net_new.all_params = list(prev_layer.all_params) + # net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.append(net_new.outputs) return net_new diff --git a/tensorlayer/layers/time_distribution.py b/tensorlayer/layers/time_distribution.py index 39a9d2193..fcec358e1 100644 --- a/tensorlayer/layers/time_distribution.py +++ b/tensorlayer/layers/time_distribution.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'TimeDistributedLayer', ] @@ -18,7 +20,7 @@ class TimeDistributedLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with output size of (batch_size, length, dim). layer_class : a :class:`Layer` class The layer class name. @@ -46,6 +48,7 @@ class TimeDistributedLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -53,14 +56,15 @@ def __init__( args=None, name='time_distributed', ): + super(TimeDistributedLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) + if args is None: args = {} if not isinstance(args, dict): raise TypeError("'args' must be a dict.") - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs - logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) if not isinstance(self.inputs, tf.Tensor): self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) From c80058eb11c1f2a63510f28c9b0c904c88e9c66a Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Fri, 13 Apr 2018 00:44:03 +0200 Subject: [PATCH 20/21] Codacy Issues Fix --- tensorlayer/layers/convolution.py | 10 ++++++---- tensorlayer/layers/object_detection.py | 2 +- tensorlayer/layers/padding.py | 10 +++++++--- tensorlayer/layers/shape.py | 2 +- tensorlayer/layers/spatial_transformer.py | 2 -- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index 3957feefb..5eecc1b6c 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -535,7 +535,7 @@ class UpSampling2dLayer(Layer): def __init__( self, prev_layer, - size=list(), + size, is_scale=True, method=0, align_corners=False, @@ -546,7 +546,8 @@ def __init__( self.inputs = prev_layer.outputs - assert isinstance(size, (list, tuple)) and len(size) == 2 + if not isinstance(size, (list, tuple)) and len(size) == 2: + raise AssertionError() if len(self.inputs.get_shape()) == 3: if is_scale: @@ -603,7 +604,7 @@ class DownSampling2dLayer(Layer): def __init__( self, prev_layer, - size=list(), + size, is_scale=True, method=0, align_corners=False, @@ -614,7 +615,8 @@ def __init__( self.inputs = prev_layer.outputs - assert isinstance(size, (list, tuple)) and len(size) == 2 + if not isinstance(size, (list, tuple)) and len(size) == 2: + raise AssertionError() if len(self.inputs.get_shape()) == 3: if is_scale: diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py index e9c8b7158..2fb1a2ccf 100644 --- a/tensorlayer/layers/object_detection.py +++ b/tensorlayer/layers/object_detection.py @@ -38,7 +38,7 @@ class ROIPoolingLayer(Layer): def __init__( self, prev_layer, - rois=list(), + rois, pool_height=2, pool_width=2, name='roipooling_layer', diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index c85bc98ee..f537f4b13 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -85,7 +85,8 @@ def __init__( self.inputs = prev_layer.outputs - assert isinstance(padding, (int, tuple, dict)) + if not isinstance(padding, (int, tuple, dict)): + raise AssertionError() self.outputs = tf.keras.layers.ZeroPadding1D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -120,7 +121,8 @@ def __init__( self.inputs = prev_layer.outputs - assert isinstance(padding, (int, tuple)) + if not isinstance(padding, (int, tuple)): + raise AssertionError() self.outputs = tf.keras.layers.ZeroPadding2D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -153,7 +155,9 @@ def __init__( logging.info("ZeroPad3d %s: padding:%s" % (name, str(padding))) self.inputs = prev_layer.outputs - assert isinstance(padding, (int, tuple)) + + if not isinstance(padding, (int, tuple)): + raise AssertionError() self.outputs = tf.keras.layers.ZeroPadding3D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index aab4029de..42b782e72 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -72,7 +72,7 @@ class ReshapeLayer(Layer): """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__(self, prev_layer, shape=list(), name='reshape'): + def __init__(self, prev_layer, shape, name='reshape'): super(ReshapeLayer, self).__init__(prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 512f5ab60..3ab4fb3f4 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -import warnings - import numpy as np import tensorflow as tf from six.moves import xrange From 6f05a43c2624cee0d52571b7e983e4943df18112 Mon Sep 17 00:00:00 2001 From: DEKHTIARJonathan Date: Fri, 13 Apr 2018 10:20:57 +0200 Subject: [PATCH 21/21] Unnecessary PR changes removed - PR Cleaned & Refactored --- tensorlayer/layers/convolution.py | 2 +- tensorlayer/layers/core.py | 2 +- tensorlayer/layers/extend.py | 2 +- tensorlayer/layers/importer.py | 4 ++-- tensorlayer/layers/padding.py | 6 +++--- tensorlayer/layers/recurrent.py | 8 ++++---- tensorlayer/layers/shape.py | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index 5eecc1b6c..162a2c22b 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -1134,7 +1134,7 @@ class _SeparableConv2dLayer(Layer): # TODO @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, - n_filter=16, + n_filter, filter_size=5, strides=(1, 1), padding='valid', diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index 01d740f96..799a9d4bd 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -381,7 +381,7 @@ class Layer(object): ... Tensor("d2/Identity:0", shape=(?, 80), dtype=float32) """ - + # Added to allow auto-completion inputs = None outputs = None all_layers = [] diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 3a078f2c0..8c3fa52b1 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -39,7 +39,7 @@ class ExpandDimsLayer(Layer): def __init__( self, prev_layer, - axis=0, + axis, name='expand_dims', ): super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py index f85bb17b9..3951b01ed 100644 --- a/tensorlayer/layers/importer.py +++ b/tensorlayer/layers/importer.py @@ -59,7 +59,7 @@ class LambdaLayer(Layer): def __init__( self, prev_layer, - fn=None, + fn, fn_args=None, name='lambda_layer', ): @@ -228,7 +228,7 @@ class EstimatorLayer(Layer): def __init__( self, prev_layer, - model_fn=None, + model_fn, args=None, name='estimator_layer', ): diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index f537f4b13..716f9da10 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -77,7 +77,7 @@ class ZeroPad1d(Layer): def __init__( self, prev_layer, - padding=1, + padding, name='zeropad1d', ): super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) @@ -113,7 +113,7 @@ class ZeroPad2d(Layer): def __init__( self, prev_layer, - padding=1, + padding, name='zeropad2d', ): super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) @@ -148,7 +148,7 @@ class ZeroPad3d(Layer): def __init__( self, prev_layer, - padding=1, + padding, name='zeropad3d', ): super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 0134de72c..09affd4c7 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -136,7 +136,7 @@ class RNNLayer(Layer): def __init__( self, prev_layer, - cell_fn=None, + cell_fn, cell_init_args=None, n_hidden=100, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -324,7 +324,7 @@ class BiRNNLayer(Layer): def __init__( self, prev_layer, - cell_fn=None, + cell_fn, cell_init_args=None, n_hidden=100, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -1027,7 +1027,7 @@ class DynamicRNNLayer(Layer): def __init__( self, prev_layer, - cell_fn=None, #tf.nn.rnn_cell.LSTMCell, + cell_fn, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, initializer=tf.random_uniform_initializer(-0.1, 0.1), @@ -1282,7 +1282,7 @@ class BiDynamicRNNLayer(Layer): def __init__( self, prev_layer, - cell_fn=None, #tf.nn.rnn_cell.LSTMCell, + cell_fn, #tf.nn.rnn_cell.LSTMCell, cell_init_args=None, n_hidden=256, initializer=tf.random_uniform_initializer(-0.1, 0.1), diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 42b782e72..d712acbbb 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -110,7 +110,7 @@ class TransposeLayer(Layer): """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release - def __init__(self, prev_layer, perm=None, name='transpose'): + def __init__(self, prev_layer, perm, name='transpose'): super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("TransposeLayer %s: perm:%s" % (name, perm))