diff --git a/.gitignore b/.gitignore index 6da63706f..2e36fa083 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ dist docs/_build tensorlayer.egg-info tensorlayer/__pacache__ +venv/ \ No newline at end of file diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index bb552423f..42a2653a7 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -2,9 +2,9 @@ from __future__ import absolute_import try: - install_instr = "Please make sure you install a recent enough version of TensorFlow." import tensorflow except ImportError: + install_instr = "Please make sure you install a recent enough version of TensorFlow." raise ImportError("__init__.py : Could not import TensorFlow." + install_instr) from . import activation diff --git a/tensorlayer/deprecation.py b/tensorlayer/deprecation.py new file mode 100644 index 000000000..61a153db6 --- /dev/null +++ b/tensorlayer/deprecation.py @@ -0,0 +1,42 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import functools +import warnings + +from . import _logging as logging + + +def deprecated_alias(end_support_version, **aliases): + def deco(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + + try: + func_name = "{}.{}".format(args[0].__class__.__name__, f.__name__) + except (NameError, IndexError): + func_name = f.__name__ + + rename_kwargs(kwargs, aliases, end_support_version, func_name) + + return f(*args, **kwargs) + + return wrapper + + return deco + + +def rename_kwargs(kwargs, aliases, end_support_version, func_name): + + for alias, new in aliases.items(): + + if alias in kwargs: + + if new in kwargs: + raise TypeError('{}() received both {} and {}'.format(func_name, alias, new)) + + warnings.warn('{}() - {} is deprecated; use {}'.format(func_name, alias, new), DeprecationWarning) + logging.warning("DeprecationWarning: {}(): " + "`{}` argument is deprecated and will be removed in version {}, " + "please change for `{}.`".format(func_name, alias, end_support_version, new)) + kwargs[new] = kwargs.pop(alias) diff --git a/tensorlayer/layers/binary.py b/tensorlayer/layers/binary.py index 9e1af031c..4987fe767 100644 --- a/tensorlayer/layers/binary.py +++ b/tensorlayer/layers/binary.py @@ -4,6 +4,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'BinaryDenseLayer', 'BinaryConv2d', @@ -124,6 +126,7 @@ class BinaryDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -136,13 +139,16 @@ def __init__( b_init_args=None, name='binary_dense', ): + super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BinaryDenseLayer %s: %d %s" % (name, n_units, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") @@ -151,7 +157,7 @@ def __init__( n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("BinaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -228,6 +234,7 @@ class BinaryConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -255,20 +262,20 @@ def __init__( # data_format=None, name='binary_cnn2d', ): + super(BinaryConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - if use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) + if use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") if len(strides) != 2: raise ValueError("len(strides) should be 2.") @@ -324,6 +331,7 @@ class TernaryDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -336,22 +344,24 @@ def __init__( b_init_args=None, name='ternary_dense', ): + super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TernaryDenseLayer %s: %d %s" % (name, n_units, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") - if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("TernaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -430,6 +440,7 @@ class TernaryConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -457,20 +468,18 @@ def __init__( # data_format=None, name='ternary_cnn2d', ): + super(TernaryConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - if use_gemm: - raise Exception("TODO. The current version use tf.matmul for inferencing.") - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) + if use_gemm: + raise Exception("TODO. The current version use tf.matmul for inferencing.") if len(strides) != 2: raise ValueError("len(strides) should be 2.") @@ -508,7 +517,7 @@ class DorefaDenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. bitW : int The bits of this layer's parameter @@ -533,6 +542,7 @@ class DorefaDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -547,22 +557,24 @@ def __init__( b_init_args=None, name='dorefa_dense', ): + super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DorefaDenseLayer %s: %d %s" % (name, n_units, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") - if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("DorefaDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) # W = tl.act.sign(W) # dont update ... @@ -596,7 +608,7 @@ class DorefaConv2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. bitW : int The bits of this layer's parameter @@ -644,6 +656,7 @@ class DorefaConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -673,21 +686,22 @@ def __init__( # data_format=None, name='dorefa_cnn2d', ): + super(DorefaConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding, + act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} + if act is None: + act = tf.identity if use_gemm: raise Exception("TODO. The current version use tf.matmul for inferencing.") - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: - act = tf.identity - logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding, - act.__name__)) - if len(strides) != 2: raise ValueError("len(strides) should be 2.") try: @@ -720,23 +734,25 @@ class SignLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. name : a str A unique layer name. """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, name='sign', ): + super(SignLayer, self).__init__(prev_layer=prev_layer, name=name) - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs logging.info("SignLayer %s" % (self.name)) + with tf.variable_scope(name): # self.outputs = tl.act.sign(self.inputs) self.outputs = quantize(self.inputs) @@ -749,7 +765,7 @@ class ScaleLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. init_scale : float The initial value for the scale factor. @@ -758,17 +774,18 @@ class ScaleLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, init_scale=0.05, name='scale', ): + super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ScaleLayer %s: init_scale: %f" % (name, init_scale)) - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs - logging.info("ScaleLayer %s: init_scale: %f" % (self.name, init_scale)) with tf.variable_scope(name): # scale = tf.get_variable(name='scale_factor', init, trainable=True, ) scale = tf.get_variable("scale", shape=[1], initializer=tf.constant_initializer(value=init_scale)) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index a49a93ade..162a2c22b 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'Conv1dLayer', 'Conv2dLayer', @@ -33,7 +35,7 @@ class Conv1dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -60,6 +62,7 @@ class Conv1dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -75,6 +78,11 @@ def __init__( b_init_args=None, name='cnn1d', ): + super(Conv1dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (name, str(shape), str(stride), padding, act.__name__)) + + self.inputs = prev_layer.outputs + if act is None: act = tf.identity if W_init_args is None: @@ -82,10 +90,6 @@ def __init__( if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__)) - with tf.variable_scope(name): W = tf.get_variable(name='W_conv1d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) self.outputs = tf.nn.convolution( @@ -112,7 +116,7 @@ class Conv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -174,6 +178,7 @@ class Conv2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -189,16 +194,17 @@ def __init__( data_format=None, name='cnn_layer', ): + super(Conv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name): W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -223,7 +229,7 @@ class DeConv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -293,6 +299,7 @@ class DeConv2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -307,17 +314,19 @@ def __init__( b_init_args=None, name='decnn2d_layer', ): + super(DeConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, - act.__name__)) + # logging.info(" DeConv2dLayer: Untested") with tf.variable_scope(name): W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -343,7 +352,7 @@ class Conv3dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -373,6 +382,7 @@ class Conv3dLayer(Layer): ... [None, 50, 50, 50, 32] """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -386,16 +396,17 @@ def __init__( b_init_args=None, name='cnn3d_layer', ): + super(Conv3dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) with tf.variable_scope(name): # W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv') @@ -424,7 +435,7 @@ class DeConv3dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. act : activation function The activation function of this layer. @@ -450,6 +461,7 @@ class DeConv3dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -464,17 +476,18 @@ def __init__( b_init_args=None, name='decnn3d_layer', ): + super(DeConv3dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, - act.__name__)) with tf.variable_scope(name): W = tf.get_variable(name='W_deconv3d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -499,7 +512,7 @@ class UpSampling2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with 4-D Tensor of the shape (batch, height, width, channels) or 3-D Tensor of the shape (height, width, channels). size : tuple of int/float (height, width) scale factor or new size of height and width. @@ -518,6 +531,7 @@ class UpSampling2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -527,21 +541,29 @@ def __init__( align_corners=False, name='upsample2d_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) + self.inputs = prev_layer.outputs + + if not isinstance(size, (list, tuple)) and len(size) == 2: + raise AssertionError() + if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) size_w = size[1] * int(self.inputs.get_shape()[1]) size = [int(size_h), int(size_w)] + elif len(self.inputs.get_shape()) == 4: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[1]) size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] + else: raise Exception("Donot support shape %s" % self.inputs.get_shape()) - logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) + with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -559,7 +581,7 @@ class DownSampling2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with 4-D Tensor in the shape of (batch, height, width, channels) or 3-D Tensor in the shape of (height, width, channels). size : tuple of int/float (height, width) scale factor or new size of height and width. @@ -578,6 +600,7 @@ class DownSampling2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -587,8 +610,14 @@ def __init__( align_corners=False, name='downsample2d_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) + self.inputs = prev_layer.outputs + + if not isinstance(size, (list, tuple)) and len(size) == 2: + raise AssertionError() + if len(self.inputs.get_shape()) == 3: if is_scale: size_h = size[0] * int(self.inputs.get_shape()[0]) @@ -600,8 +629,8 @@ def __init__( size_w = size[1] * int(self.inputs.get_shape()[2]) size = [int(size_h), int(size_w)] else: - raise Exception("Donot support shape %s" % self.inputs.get_shape()) - logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) + raise Exception("Do not support shape %s" % self.inputs.get_shape()) + with tf.variable_scope(name): try: self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) @@ -620,7 +649,7 @@ class DeformableConv2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. offset_layer : :class:`Layer` To predict the offset of convolution operations. @@ -662,6 +691,7 @@ class DeformableConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -675,6 +705,7 @@ def __init__( b_init=tf.constant_initializer(value=0.0), W_init_args=None, b_init_args=None): + if tf.__version__ < "1.4": raise Exception("Deformable CNN layer requires tensrflow 1.4 or higher version | current version %s" % tf.__version__) @@ -812,12 +843,14 @@ def _tf_batch_map_offsets(inputs, offsets, grid_offset): return mapped_vals - Layer.__init__(self, prev_layer=[prev_layer, offset_layer], name=name) + super(DeformableConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % (name, n_filter, str(filter_size), act.__name__)) + self.inputs = prev_layer.outputs + self.offset_layer = offset_layer if act is None: act = tf.identity - logging.info("DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % (self.name, n_filter, str(filter_size), act.__name__)) try: pre_channel = int(prev_layer.outputs.get_shape()[-1]) @@ -891,8 +924,9 @@ def _tf_batch_map_offsets(inputs, offsets, grid_offset): self.all_params.append(W) +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def atrous_conv1d( - layer, + prev_layer, n_filter=32, filter_size=2, stride=1, @@ -910,7 +944,7 @@ def atrous_conv1d( Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -950,9 +984,9 @@ def atrous_conv1d( b_init_args = {} return Conv1dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + shape=(filter_size, int(prev_layer.outputs.get_shape()[-1]), n_filter), stride=stride, padding=padding, dilation_rate=dilation, @@ -971,7 +1005,7 @@ class AtrousConv2dLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with a 4D output tensor in the shape of (batch, height, width, channels). n_filter : int The number of filters. @@ -998,6 +1032,7 @@ class AtrousConv2dLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, n_filter=32, @@ -1010,16 +1045,19 @@ def __init__(self, W_init_args=None, b_init_args=None, name='atrou2d'): + + super(AtrousConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (name, n_filter, filter_size, rate, padding, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if act is None: act = tf.identity - logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__)) + with tf.variable_scope(name): shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) @@ -1046,7 +1084,7 @@ class _SeparableConv2dLayer(Layer): # TODO Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with a 4D output tensor in the shape of [batch, height, width, channels]. n_filter : int The number of filters. @@ -1093,6 +1131,7 @@ class _SeparableConv2dLayer(Layer): # TODO """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, n_filter, @@ -1112,16 +1151,18 @@ def __init__(self, bias_regularizer=None, activity_regularizer=None, name='atrou2d'): - Layer.__init__(self, prev_layer=prev_layer, name=name) + + super(_SeparableConv2dLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SeparableConv2dLayer %s: n_filter:%d filter_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % + (name, n_filter, filter_size, str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) + self.inputs = prev_layer.outputs + if tf.__version__ > "0.12.1": raise Exception("This layer only supports for TF 1.0+") bias_initializer = bias_initializer() - logging.info("SeparableConv2dLayer %s: n_filter:%d filter_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % - (self.name, n_filter, filter_size, str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) - with tf.variable_scope(name) as vs: self.outputs = tf.layers.separable_conv2d( self.inputs, @@ -1219,8 +1260,9 @@ def deconv2d_bilinear_upsampling_initializer(shape): return bilinear_weights_init +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def conv1d( - layer, + prev_layer, n_filter=32, filter_size=5, stride=1, @@ -1238,7 +1280,7 @@ def conv1d( Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer n_filter : int The number of filters @@ -1295,9 +1337,9 @@ def conv1d( b_init_args = {} return Conv1dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + shape=(filter_size, int(prev_layer.outputs.get_shape()[-1]), n_filter), stride=stride, dilation_rate=dilation_rate, padding=padding, @@ -1313,13 +1355,12 @@ def conv1d( # TODO: DeConv1d -# def conv2d( class Conv2d(Layer): """Simplified version of :class:`Conv2dLayer`. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1365,9 +1406,10 @@ class Conv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - layer, + prev_layer, n_filter=32, filter_size=(3, 3), strides=(1, 1), @@ -1408,14 +1450,14 @@ def __init__( # data_format=data_format, # name=name) + super(Conv2d, self).__init__(prev_layer=prev_layer, name=name) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=layer, name=name) - self.inputs = layer.outputs - if act is None: act = tf.identity @@ -1452,7 +1494,7 @@ def __init__( if len(strides) != 2: raise ValueError("len(strides) should be 2, Conv2d and Conv2dLayer are different.") try: - pre_channel = int(layer.outputs.get_shape()[-1]) + pre_channel = int(prev_layer.outputs.get_shape()[-1]) except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net pre_channel = 1 logging.info("[warnings] unknow input channels, set to 1") @@ -1478,8 +1520,9 @@ def __init__( self.all_params.append(W) -def deconv2d(layer, - n_filter=32, +@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release +def deconv2d(prev_layer, + n_filter, filter_size=(3, 3), out_size=(30, 30), strides=(2, 2), @@ -1495,7 +1538,7 @@ def deconv2d(layer, Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1529,23 +1572,27 @@ def deconv2d(layer, A :class:`DeConv2dLayer` object. """ + + logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} if act is None: act = tf.identity + if len(strides) != 2: raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.") + if tf.__version__ > '1.3': - logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) - inputs = layer.outputs + inputs = prev_layer.outputs scope_name = tf.get_variable_scope().name # if scope_name: # whole_name = scope_name + '/' + name # else: # whole_name = name - net_new = Layer(name=name) #whole_name) + net_new = Layer(prev_layer=None, name=name) # with tf.name_scope(name): with tf.variable_scope(name) as vs: net_new.outputs = tf.contrib.layers.conv2d_transpose( @@ -1559,25 +1606,25 @@ def deconv2d(layer, biases_initializer=b_init, scope=name) new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - net_new.all_layers = list(layer.all_layers) - net_new.all_params = list(layer.all_params) - net_new.all_drop = dict(layer.all_drop) + net_new.all_layers = list(prev_layer.all_layers) + net_new.all_params = list(prev_layer.all_params) + net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.extend([net_new.outputs]) net_new.all_params.extend(new_variables) return net_new else: if batch_size is None: # batch_size = tf.shape(net.outputs)[0] - fixed_batch_size = layer.outputs.get_shape().with_rank_at_least(1)[0] + fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0] if fixed_batch_size.value: batch_size = fixed_batch_size.value else: from tensorflow.python.ops import array_ops - batch_size = array_ops.shape(layer.outputs)[0] + batch_size = array_ops.shape(prev_layer.outputs)[0] return DeConv2dLayer( - prev_layer=layer, + prev_layer=prev_layer, act=act, - shape=(filter_size[0], filter_size[1], n_filter, int(layer.outputs.get_shape()[-1])), + shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])), output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter), strides=(1, strides[0], strides[1], 1), padding=padding, @@ -1593,7 +1640,7 @@ class DeConv3d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1614,6 +1661,7 @@ class DeConv3d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, n_filter=32, @@ -1624,10 +1672,12 @@ def __init__(self, W_init=tf.truncated_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(value=0.0), name='decnn3d'): - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs + + super(DeConv3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) + self.inputs = prev_layer.outputs + with tf.variable_scope(name) as vs: self.outputs = tf.contrib.layers.conv3d_transpose( inputs=self.inputs, @@ -1659,7 +1709,7 @@ class DepthwiseConv2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. filter_size : tuple of int The filter size (height, width). @@ -1707,6 +1757,7 @@ class DepthwiseConv2d(Layer): """ # # https://zhuanlan.zhihu.com/p/31551004 https://github.com/xiaohu2015/DeepLearning_tutorials/blob/master/CNNs/MobileNet.py + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1722,23 +1773,23 @@ def __init__( b_init_args=None, name='depthwise_conv2d', ): + super(DepthwiseConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (name, str(shape), str(strides), padding, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - if act is None: act = tf.identity - logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) try: pre_channel = int(prev_layer.outputs.get_shape()[-1]) except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net pre_channel = 1 - logging.info("[warnings] unknow input channels, set to 1") + logging.info("[warnings] unknown input channels, set to 1") shape = [shape[0], shape[1], pre_channel, depth_multiplier] @@ -1776,7 +1827,7 @@ class SeparableConv2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The dimensionality of the output space (i.e. the number of filters in the convolution). @@ -1803,6 +1854,7 @@ class SeparableConv2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1836,11 +1888,12 @@ def __init__( # if b_init_args is None: # b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(SeparableConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" % (self.name, n_filter, str(filter_size), + str(strides), depth_multiplier, + act.__name__)) + self.inputs = prev_layer.outputs - # print(self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__) - logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" \ - % (self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__)) with tf.variable_scope(name) as vs: self.outputs = tf.layers.separable_conv2d( @@ -1877,7 +1930,7 @@ class GroupConv2d(Layer): Parameters -------------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_filter : int The number of filters. @@ -1903,9 +1956,10 @@ class GroupConv2d(Layer): A unique layer name. """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, + prev_layer, n_filter=32, filter_size=(3, 3), strides=(2, 2), @@ -1918,18 +1972,21 @@ def __init__( b_init_args=None, name='groupconv', ): # Windaway + + super(GroupConv2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), n_group, + padding, act.__name__)) + + self.inputs = prev_layer.outputs + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs groupConv = lambda i, k: tf.nn.conv2d(i, k, strides=[1, strides[0], strides[1], 1], padding=padding) channels = int(self.inputs.get_shape()[-1]) - logging.info("GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), n_group, - padding, act.__name__)) with tf.variable_scope(name): We = tf.get_variable( name='W', diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index 7c28b91b4..799a9d4bd 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -9,6 +9,8 @@ from .. import _logging as logging from .. import files, iterate, utils, visualize +from ..deprecation import deprecated_alias + __all__ = [ 'LayersConfig', 'TF_GRAPHKEYS_VARIABLES', @@ -379,8 +381,16 @@ class Layer(object): ... Tensor("d2/Identity:0", shape=(?, 80), dtype=float32) """ + # Added to allow auto-completion + inputs = None + outputs = None + all_layers = [] + all_params = [] + all_drop = {} + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name=None): - def __init__(self, prev_layer=None, name=None): if name is None: raise ValueError('Layer must have a name.') @@ -396,12 +406,15 @@ def __init__(self, prev_layer=None, name=None): self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) self.all_drop = dict(prev_layer.all_drop) + elif isinstance(prev_layer, list): # 2. for layer have multiply inputs i.e. ConcatLayer self.all_layers = list_remove_repeat(sum([l.all_layers for l in prev_layer], [])) self.all_params = list_remove_repeat(sum([l.all_params for l in prev_layer], [])) self.all_drop = dict(sum([list(l.all_drop.items()) for l in prev_layer], [])) + elif isinstance(prev_layer, tf.Tensor): raise Exception("Please use InputLayer to convert Tensor/Placeholder to TL layer") + elif prev_layer is not None: # tl.models self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) @@ -450,7 +463,7 @@ def __str__(self): return " Last layer is: %s (%s) %s" % (self.__class__.__name__, self.name, self.outputs.get_shape().as_list()) def __getitem__(self, key): - net_new = Layer(name=self.name) + net_new = Layer(prev_layer=None, name=self.name) net_new.inputs = self.inputs net_new.outputs = self.outputs[key] @@ -489,8 +502,10 @@ class InputLayer(Layer): """ def __init__(self, inputs=None, name='input'): - Layer.__init__(self, name=name) + + super(InputLayer, self).__init__(prev_layer=None, name=name) logging.info("InputLayer %s: %s" % (self.name, inputs.get_shape())) + self.outputs = inputs self.all_layers = [] self.all_params = [] @@ -527,8 +542,10 @@ class OneHotInputLayer(Layer): """ def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): - Layer.__init__(self, name=name) + + super(OneHotInputLayer, self).__init__(prev_layer=None, name=name) logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) + # assert depth != None, "depth is not given" if depth is None: logging.info(" [*] depth == None the number of output units is undefined") @@ -649,10 +666,11 @@ def __init__( if nce_b_init_args is None: nce_b_init_args = {} - Layer.__init__(self, name=name) - self.inputs = inputs + super(Word2vecEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + self.inputs = inputs + # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix @@ -740,10 +758,11 @@ def __init__( if E_init_args is None: E_init_args = {} - Layer.__init__(self, name=name) - self.inputs = inputs + super(EmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + self.inputs = inputs + with tf.variable_scope(name): embeddings = tf.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) @@ -803,8 +822,9 @@ def __init__( embeddings_kwargs=None, name='average_embedding', ): - # super().__init__(name=name) # dont work for py2 - Layer.__init__(self, name=name) + + super(AverageEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) + logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) # if embeddings_kwargs is None: # embeddings_kwargs = {} @@ -814,7 +834,6 @@ def __init__( self.inputs = inputs - logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) with tf.variable_scope(name): self.embeddings = tf.get_variable( name='embeddings', @@ -865,7 +884,7 @@ class DenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. n_units : int The number of units of this layer. @@ -902,6 +921,7 @@ class DenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -913,19 +933,23 @@ def __init__( b_init_args=None, name='dense', ): + + super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DenseLayer %s: %d %s" % (name, n_units, act.__name__)) + + self.inputs = prev_layer.outputs + self.n_units = n_units + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) - self.n_units = n_units - logging.info("DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: @@ -951,7 +975,7 @@ class ReconLayer(DenseLayer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. x_recon : placeholder or tensor The target for reconstruction. @@ -990,6 +1014,7 @@ class ReconLayer(DenseLayer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -998,8 +1023,9 @@ def __init__( act=tf.nn.softplus, name='recon', ): - DenseLayer.__init__(self, prev_layer=prev_layer, n_units=n_units, act=act, name=name) - logging.info("%s is a ReconLayer" % self.name) + super(ReconLayer, self).__init__(prev_layer=prev_layer, n_units=n_units, act=act, name=name) + + logging.info("ReconLayer %s" % self.name) # y : reconstruction outputs; train_params : parameters to train # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] @@ -1143,7 +1169,7 @@ class DropoutLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. @@ -1194,6 +1220,7 @@ class DropoutLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1203,7 +1230,9 @@ def __init__( seed=None, name='dropout_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (name, keep, is_fix)) + if is_train is False: logging.info(" skip DropoutLayer") self.outputs = prev_layer.outputs @@ -1212,7 +1241,6 @@ def __init__( # self.all_drop = dict(layer.all_drop) else: self.inputs = prev_layer.outputs - logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) # The name of placeholder for keep_prob is the same with the name # of the Layer. @@ -1254,7 +1282,7 @@ class GaussianNoiseLayer(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. mean : float The mean. Default is 0. @@ -1277,6 +1305,7 @@ class GaussianNoiseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1286,7 +1315,8 @@ def __init__( seed=None, name='gaussian_noise_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, name=name) + if is_train is False: logging.info(" skip GaussianNoiseLayer") self.outputs = prev_layer.outputs @@ -1314,7 +1344,7 @@ class DropconnectDenseLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. @@ -1350,6 +1380,7 @@ class DropconnectDenseLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1362,18 +1393,20 @@ def __init__( b_init_args=None, name='dropconnect_layer', ): + super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("DropconnectDenseLayer %s: %d %s" % (name, n_units, act.__name__)) + if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs + if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units - logging.info("DropconnectDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 2b5190f3c..8c3fa52b1 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'ExpandDimsLayer', 'TileLayer', @@ -18,7 +20,7 @@ class ExpandDimsLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. axis : int The dimension index at which to expand the shape of input. @@ -33,16 +35,18 @@ class ExpandDimsLayer(Layer): ... [None, 100, 1] """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, axis, name='expand_dims', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ExpandDimsLayer %s: axis:%d" % (name, axis)) + self.inputs = prev_layer.outputs - logging.info("ExpandDimsLayer %s: axis:%d" % (self.name, axis)) with tf.variable_scope(name): try: # TF12 TF1.0 self.outputs = tf.expand_dims(self.inputs, axis=axis) @@ -62,7 +66,7 @@ class TileLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. multiples: tensor Must be one of the following types: int32, int64. @@ -80,16 +84,13 @@ class TileLayer(Layer): ... [None, 100, 3] """ - def __init__( - self, - prev_layer=None, - multiples=None, - name='tile', - ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, multiples=None, name='tile'): + super(TileLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TileLayer %s: multiples:%s" % (name, multiples)) + self.inputs = prev_layer.outputs - logging.info("TileLayer %s: multiples:%s" % (self.name, multiples)) with tf.variable_scope(name): self.outputs = tf.tile(self.inputs, multiples=multiples) # self.all_layers = list(layer.all_layers) diff --git a/tensorlayer/layers/flow_control.py b/tensorlayer/layers/flow_control.py index c3c0afbe0..1f97fc8b4 100644 --- a/tensorlayer/layers/flow_control.py +++ b/tensorlayer/layers/flow_control.py @@ -53,7 +53,7 @@ class MultiplexerLayer(Layer): """ def __init__(self, layers, name='mux_layer'): - Layer.__init__(self, prev_layer=layers, name=name) + super(MultiplexerLayer, self).__init__(prev_layer=layers, name=name) self.n_inputs = len(layers) self.inputs = [] diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py index efc9689e9..3951b01ed 100644 --- a/tensorlayer/layers/importer.py +++ b/tensorlayer/layers/importer.py @@ -6,6 +6,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'LambdaLayer', 'SlimNetsLayer', @@ -19,7 +21,7 @@ class LambdaLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. fn : function The function that applies to the outputs of previous layer. @@ -53,6 +55,7 @@ class LambdaLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -60,13 +63,18 @@ def __init__( fn_args=None, name='lambda_layer', ): + + super(LambdaLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LambdaLayer %s" % name) + + self.inputs = prev_layer.outputs + if fn_args is None: fn_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + assert prev_layer is not None assert fn is not None - self.inputs = prev_layer.outputs - logging.info("LambdaLayer %s" % self.name) + with tf.variable_scope(name) as vs: self.outputs = fn(self.inputs, **fn_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -86,7 +94,7 @@ class SlimNetsLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. slim_layer : a slim network function The network you want to stack onto, end with ``return net, end_points``. @@ -101,6 +109,7 @@ class SlimNetsLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -108,15 +117,17 @@ def __init__( slim_args=None, name='tfslim_layer', ): + + super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("SlimNetsLayer %s: %s" % (name, slim_layer.__name__)) + + self.inputs = prev_layer.outputs + if slim_layer is None: raise ValueError("slim layer is None") if slim_args is None: slim_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - logging.info("SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) - # with tf.variable_scope(name) as vs: # net, end_points = slim_layer(self.inputs, **slim_args) # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -152,7 +163,7 @@ class KerasLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer keras_layer : function A tensor in tensor out function for building model. @@ -163,6 +174,7 @@ class KerasLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -170,15 +182,19 @@ def __init__( keras_args=None, name='keras_layer', ): + + super(KerasLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("KerasLayer %s: %s" % (name, keras_layer)) + + self.inputs = prev_layer.outputs + if prev_layer is None: raise ValueError("layer is None") if keras_args is None: keras_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - logging.info("KerasLayer %s: %s" % (self.name, keras_layer)) - logging.info("This API will be removed, please use LambdaLayer instead.") + logging.warning("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: self.outputs = keras_layer(self.inputs, **keras_args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) @@ -197,7 +213,7 @@ class EstimatorLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer model_fn : function A tensor in tensor out function for building model. @@ -208,6 +224,7 @@ class EstimatorLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -215,14 +232,18 @@ def __init__( args=None, name='estimator_layer', ): + super(EstimatorLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("EstimatorLayer %s: %s" % (name, model_fn)) + + self.inputs = prev_layer.outputs + if model_fn is None: raise ValueError('model fn is None') if args is None: args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs - logging.info("EstimatorLayer %s: %s" % (self.name, model_fn)) - logging.info("This API will be removed, please use LambdaLayer instead.") + + logging.warning("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: self.outputs = model_fn(self.inputs, **args) variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 84150a95e..66fe05f98 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -55,7 +55,9 @@ def __init__( concat_dim=-1, name='concat_layer', ): - Layer.__init__(self, prev_layer=layers, name=name) + + super(ConcatLayer, self).__init__(prev_layer=layers, name=name) + self.inputs = [] for l in layers: self.inputs.append(l.outputs) @@ -120,8 +122,8 @@ def __init__( act=None, name='elementwise_layer', ): - Layer.__init__(self, prev_layer=layers, name=name) + super(ElementwiseLayer, self).__init__(prev_layer=layers, name=name) logging.info("ElementwiseLayer %s: size:%s fn:%s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__)) self.outputs = layers[0].outputs diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 20f42440e..febc70270 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'LocalResponseNormLayer', 'BatchNormLayer', @@ -21,7 +23,7 @@ class LocalResponseNormLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a 4D output shape. depth_radius : int Depth radius. 0-D. Half-width of the 1-D normalization window. @@ -36,6 +38,7 @@ class LocalResponseNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -45,10 +48,11 @@ def __init__( beta=None, name='lrn_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(LocalResponseNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (name, str(depth_radius), str(bias), str(alpha), str(beta))) + self.inputs = prev_layer.outputs - logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (self.name, str(depth_radius), str(bias), str(alpha), - str(beta))) + with tf.variable_scope(name): self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) @@ -65,7 +69,7 @@ class BatchNormLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. decay : float A decay factor for `ExponentialMovingAverage`. @@ -93,6 +97,7 @@ class BatchNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -104,9 +109,11 @@ def __init__( gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), name='batchnorm_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(BatchNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (name, decay, epsilon, act.__name__, is_train)) + self.inputs = prev_layer.outputs - logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train)) + x_shape = self.inputs.get_shape() params_shape = x_shape[-1:] @@ -192,7 +199,7 @@ class InstanceNormLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. act : activation function. The activation function of this layer. @@ -203,6 +210,7 @@ class InstanceNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -210,10 +218,11 @@ def __init__( epsilon=1e-5, name='instan_norm', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) - self.inputs = prev_layer.outputs + super(InstanceNormLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("InstanceNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__)) + self.inputs = prev_layer.outputs + with tf.variable_scope(name) as vs: mean, var = tf.nn.moments(self.inputs, [1, 2], keep_dims=True) scale = tf.get_variable( @@ -236,7 +245,7 @@ class LayerNormLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. act : activation function The activation function of this layer. @@ -245,6 +254,7 @@ class LayerNormLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, center=True, @@ -258,9 +268,10 @@ def __init__(self, begin_params_axis=-1, name='layernorm'): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(LayerNormLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("LayerNormLayer %s: act:%s" % (name, act.__name__)) + self.inputs = prev_layer.outputs - logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__)) if tf.__version__ < "1.3": # raise Exception("Please use TF 1.3+") diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py index 2074dcc8d..2fb1a2ccf 100644 --- a/tensorlayer/layers/object_detection.py +++ b/tensorlayer/layers/object_detection.py @@ -3,6 +3,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'ROIPoolingLayer', ] @@ -14,7 +16,7 @@ class ROIPoolingLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. rois : tuple of int Regions of interest in the format of (feature map index, upper left, bottom right). @@ -32,6 +34,7 @@ class ROIPoolingLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -40,9 +43,11 @@ def __init__( pool_width=2, name='roipooling_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ROIPoolingLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ROIPoolingLayer %s: (%d, %d)" % (name, pool_height, pool_width)) + self.inputs = prev_layer.outputs - logging.info("ROIPoolingLayer %s: (%d, %d)" % (self.name, pool_height, pool_width)) + try: from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import roi_pooling except Exception as e: diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 6da27cdd8..716f9da10 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PadLayer', 'ZeroPad1d', @@ -19,7 +21,7 @@ class PadLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : list of lists of 2 ints, or a Tensor of type int32. The int32 values to pad. @@ -35,6 +37,7 @@ class PadLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -42,11 +45,14 @@ def __init__( mode='CONSTANT', name='pad_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(PadLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PadLayer %s: padding:%s mode:%s" % (name, list(padding), mode)) + + self.inputs = prev_layer.outputs + if padding is None: raise Exception("padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad") - self.inputs = prev_layer.outputs - logging.info("PadLayer %s: padding:%s mode:%s" % (self.name, list(padding), mode)) + self.outputs = tf.pad(self.inputs, paddings=padding, mode=mode, name=name) self.all_layers.append(self.outputs) @@ -57,7 +63,7 @@ class ZeroPad1d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints - If int, zeros to add at the beginning and end of the padding dimension (axis 1). @@ -67,15 +73,21 @@ class ZeroPad1d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, padding, name='zeropad1d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad1d %s: padding:%s" % (name, str(padding))) + self.inputs = prev_layer.outputs - logging.info("ZeroPad1d %s: padding:%s" % (self.name, str(padding))) + + if not isinstance(padding, (int, tuple, dict)): + raise AssertionError() + self.outputs = tf.keras.layers.ZeroPadding1D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -86,7 +98,7 @@ class ZeroPad2d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. @@ -97,15 +109,21 @@ class ZeroPad2d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, padding, name='zeropad2d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad2d %s: padding:%s" % (name, str(padding))) + self.inputs = prev_layer.outputs - logging.info("ZeroPad2d %s: padding:%s" % (self.name, str(padding))) + + if not isinstance(padding, (int, tuple)): + raise AssertionError() + self.outputs = tf.keras.layers.ZeroPadding2D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) @@ -116,7 +134,7 @@ class ZeroPad3d(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. @@ -133,8 +151,13 @@ def __init__( padding, name='zeropad3d', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) + logging.info("ZeroPad3d %s: padding:%s" % (name, str(padding))) + self.inputs = prev_layer.outputs - logging.info("ZeroPad3d %s: padding:%s" % (self.name, str(padding))) + + if not isinstance(padding, (int, tuple)): + raise AssertionError() + self.outputs = tf.keras.layers.ZeroPadding3D(padding=padding, name=name)(self.inputs) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 97df8e79f..5bd5989dd 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -7,6 +7,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PoolLayer', 'MaxPool1d', @@ -32,7 +34,7 @@ class PoolLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer. ksize : tuple of int The size of the window for each dimension of the input tensor. @@ -54,28 +56,32 @@ class PoolLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, + prev_layer, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', pool=tf.nn.max_pool, name='pool_layer', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (name, str(ksize), str(strides), padding, pool.__name__)) + self.inputs = prev_layer.outputs - logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (self.name, str(ksize), str(strides), padding, pool.__name__)) + self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) self.all_layers.append(self.outputs) -def maxpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def maxpool1d(prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): """Max pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.max_pooling1d `__ . Parameters ---------- - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. filter_size : tuple of int Pooling window size. @@ -98,20 +104,21 @@ def maxpool1d(net, filter_size=3, strides=2, padding='valid', data_format='chann """ logging.info("MaxPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.max_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + outputs = tf.layers.max_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - net_new = copy.copy(net) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new -def meanpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def meanpool1d(prev_layer, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): """Mean pooling for 1D signal [batch, length, channel]. Wrapper for `tf.layers.average_pooling1d `__ . Parameters ------------ - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. filter_size : tuple of int Pooling window size. @@ -134,20 +141,21 @@ def meanpool1d(net, filter_size=3, strides=2, padding='valid', data_format='chan """ logging.info("MeanPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.average_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + outputs = tf.layers.average_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - net_new = copy.copy(net) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new -def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def maxpool2d(prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool'): """Max pooling for 2D image [batch, height, width, channel]. Wrapper for :class:`PoolLayer`. Parameters ----------- - net : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. filter_size : tuple of int (height, width) for filter size. @@ -167,24 +175,28 @@ def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='max if strides is None: strides = filter_size if tf.__version__ > '1.5': - outputs = tf.layers.max_pooling2d(net.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) - net_new = copy.copy(net) + outputs = tf.layers.max_pooling2d(prev_layer.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new else: + assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." - net = PoolLayer( - net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) - return net + prev_layer = PoolLayer( + prev_layer, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) + + return prev_layer -def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool'): + +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def meanpool2d(prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool'): """Mean pooling for 2D image [batch, height, width, channel]. Wrapper for :class:`PoolLayer`. Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. filter_size : tuple of int (height, width) for filter size. @@ -204,16 +216,18 @@ def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='me if strides is None: strides = filter_size if tf.__version__ > '1.5': - outputs = tf.layers.average_pooling2d(net.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) - net_new = copy.copy(net) + outputs = tf.layers.average_pooling2d(prev_layer.outputs, filter_size, strides, padding=padding, data_format='channels_last', name=name) + net_new = copy.copy(prev_layer) net_new.outputs = outputs net_new.all_layers.extend([outputs]) return net_new else: + assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." - net = PoolLayer( - net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) - return net + + prev_layer = PoolLayer( + prev_layer, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) + return prev_layer # def maxpool3d(net, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): @@ -222,7 +236,7 @@ class MaxPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. filter_size : tuple of int Pooling window size. @@ -245,12 +259,14 @@ class MaxPool3d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs + + super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("MaxPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + + self.inputs = prev_layer.outputs + self.outputs = tf.layers.max_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) # update layer (customized) self.all_layers.append(self.outputs) @@ -262,7 +278,7 @@ class MeanPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. filter_size : tuple of int Pooling window size. @@ -285,15 +301,17 @@ class MeanPool3d(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='meanpool3d'): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + + super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("MeanPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.layers.average_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -303,7 +321,7 @@ class GlobalMaxPool1d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. name : str A unique layer name. @@ -316,19 +334,16 @@ class GlobalMaxPool1d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmaxpool1d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool1d'): + super(GlobalMaxPool1d, self).__init__(prev_layer=prev_layer, name=name) logging.info("GlobalMaxPool1d %s" % name) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=1, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -338,7 +353,7 @@ class GlobalMeanPool1d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 3 [batch, length, channel]. name : str A unique layer name. @@ -351,19 +366,16 @@ class GlobalMeanPool1d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmeanpool1d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool1d'): + super(GlobalMeanPool1d, self).__init__(prev_layer=prev_layer, name=name) logging.info("GlobalMeanPool1d %s" % name) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=1, name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -373,7 +385,7 @@ class GlobalMaxPool2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. name : str A unique layer name. @@ -386,19 +398,16 @@ class GlobalMaxPool2d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmaxpool2d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool2d'): + super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, name=name) logging.info("GlobalMaxPool2d %s" % name) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -408,7 +417,7 @@ class GlobalMeanPool2d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 4 [batch, height, width, channel]. name : str A unique layer name. @@ -421,19 +430,16 @@ class GlobalMeanPool2d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmeanpool2d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool2d'): + super(GlobalMeanPool2d, self).__init__(prev_layer=prev_layer, name=name) logging.info("GlobalMeanPool2d %s" % name) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -443,7 +449,7 @@ class GlobalMaxPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. name : str A unique layer name. @@ -456,19 +462,18 @@ class GlobalMaxPool3d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmaxpool3d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmaxpool3d'): + super(GlobalMaxPool3d, self).__init__(prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + # print out info (customized) logging.info("GlobalMaxPool3d %s" % name) + # operation (customized) self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2, 3], name=name) + # update layer (customized) self.all_layers.append(self.outputs) @@ -478,7 +483,7 @@ class GlobalMeanPool3d(Layer): Parameters ------------ - layer : :class:`Layer` + prev_layer : :class:`Layer` The previous layer with a output rank as 5 [batch, depth, height, width, channel]. name : str A unique layer name. @@ -491,19 +496,16 @@ class GlobalMeanPool3d(Layer): ... [None, 30] """ - def __init__( - self, - prev_layer=None, - name='globalmeanpool3d', - ): - # check layer name (fixed) - Layer.__init__(self, prev_layer=prev_layer, name=name) - # the input of this layer is the output of previous layer (fixed) - self.inputs = prev_layer.outputs - # print out info (customized) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='globalmeanpool3d'): + super(GlobalMeanPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info("GlobalMeanPool3d %s" % name) + + self.inputs = prev_layer.outputs + # operation (customized) self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2, 3], name=name) + # update layer (customized) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 5d6208e46..09affd4c7 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -7,6 +7,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'RNNLayer', 'BiRNNLayer', @@ -31,7 +33,7 @@ class RNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -130,6 +132,7 @@ class RNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -143,10 +146,12 @@ def __init__( return_seq_2d=False, name='rnn', ): + super(RNNLayer, self).__init__(prev_layer=prev_layer, name=name) + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -155,8 +160,6 @@ def __init__( except Exception: logging.warning('pop state_is_tuple fails.') - self.inputs = prev_layer.outputs - logging.info("RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) # You can get the dimension by .get_shape() or ._shape, and check the @@ -255,7 +258,7 @@ class BiRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell. @@ -317,6 +320,7 @@ class BiRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -333,10 +337,12 @@ def __init__( return_seq_2d=False, name='birnn', ): + super(BiRNNLayer, self).__init__(prev_layer=prev_layer, name=name) + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} # 'use_peepholes': True, - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -345,8 +351,6 @@ def __init__( except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs - logging.info("BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), @@ -638,7 +642,7 @@ class ConvLSTMLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer cell_shape : tuple of int The shape of each cell width * height @@ -687,6 +691,7 @@ class ConvLSTMLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -701,8 +706,10 @@ def __init__( return_seq_2d=False, name='convlstm', ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("ConvLSTMLayer %s: feature_map:%d, n_steps:%d, " "in_dim:%d %s, cell_fn:%s " % (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) # You can get the dimension by .get_shape() or ._shape, and check the @@ -922,7 +929,7 @@ class DynamicRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -1016,6 +1023,7 @@ class DynamicRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1032,14 +1040,16 @@ def __init__( dynamic_rnn_init_args=None, name='dyrnn', ): + super(DynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) + + self.inputs = prev_layer.outputs + if dynamic_rnn_init_args is None: dynamic_rnn_init_args = {} if cell_init_args is None: cell_init_args = {'state_is_tuple': True} if return_last is None: return_last = True - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -1047,7 +1057,6 @@ def __init__( cell_init_args.pop('state_is_tuple') except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs logging.info("DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) @@ -1196,7 +1205,7 @@ class BiDynamicRNNLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. cell_fn : TensorFlow cell function A TensorFlow core RNN cell @@ -1269,6 +1278,7 @@ class BiDynamicRNNLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -1286,12 +1296,14 @@ def __init__( dynamic_rnn_init_args=None, name='bi_dyrnn_layer', ): + super(BiDynamicRNNLayer, self).__init__(prev_layer=prev_layer, name=name) + + self.inputs = prev_layer.outputs + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} if dynamic_rnn_init_args is None: dynamic_rnn_init_args = {} - - Layer.__init__(self, prev_layer=prev_layer, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: @@ -1299,7 +1311,6 @@ def __init__( cell_init_args.pop('state_is_tuple') except Exception: logging.warning("pop state_is_tuple fails.") - self.inputs = prev_layer.outputs logging.info("BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) @@ -1573,10 +1584,11 @@ def __init__( return_seq_2d=False, name='seq2seq', ): + super(Seq2Seq, self).__init__(prev_layer=None, name=name) + if cell_init_args is None: cell_init_args = {'state_is_tuple': True} - Layer.__init__(self, name=name) if cell_fn is None: raise Exception("Please put in cell_fn") if 'GRU' in cell_fn.__name__: diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 71a2fa1e9..d712acbbb 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'FlattenLayer', 'ReshapeLayer', @@ -20,7 +22,7 @@ class FlattenLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. name : str A unique layer name. @@ -34,25 +36,25 @@ class FlattenLayer(Layer): """ - def __init__( - self, - prev_layer, - name='flatten', - ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, name='flatten'): + super(FlattenLayer, self).__init__(prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + self.outputs = flatten_reshape(self.inputs, name=name) self.n_units = int(self.outputs.get_shape()[-1]) - logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) self.all_layers.append(self.outputs) + logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) + class ReshapeLayer(Layer): """A layer that reshapes a given tensor. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer shape : tuple of int The output shape, see ``tf.reshape``. @@ -69,18 +71,20 @@ class ReshapeLayer(Layer): """ - def __init__( - self, - prev_layer, - shape, - name='reshape', - ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, shape, name='reshape'): + super(ReshapeLayer, self).__init__(prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + + if not shape: + raise ValueError("Shape list can not be empty") + self.outputs = tf.reshape(self.inputs, shape=shape, name=name) - logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) self.all_layers.append(self.outputs) + logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) + class TransposeLayer(Layer): """A layer that transposes the dimension of a tensor. @@ -89,7 +93,7 @@ class TransposeLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer perm: list of int The permutation of the dimensions, similar with ``numpy.transpose``. @@ -105,16 +109,15 @@ class TransposeLayer(Layer): """ - def __init__( - self, - prev_layer, - perm, - name='transpose', - ): - Layer.__init__(self, prev_layer=prev_layer, name=name) + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__(self, prev_layer, perm, name='transpose'): + + super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TransposeLayer %s: perm:%s" % (name, perm)) + self.inputs = prev_layer.outputs + assert perm is not None - logging.info("TransposeLayer %s: perm:%s" % (self.name, perm)) self.outputs = tf.transpose(self.inputs, perm=perm, name=name) self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 093ce5c91..3ab4fb3f4 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -7,6 +7,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'transformer', 'batch_transformer', @@ -207,7 +209,7 @@ class SpatialTransformer2dAffineLayer(Layer): Parameters ----------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer. theta_layer : :class:`Layer` The localisation network. @@ -224,19 +226,23 @@ class SpatialTransformer2dAffineLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, - prev_layer=None, - theta_layer=None, + prev_layer, + theta_layer, out_size=None, - name='sapatial_trans_2d_affine', + name='spatial_trans_2d_affine', ): - if out_size is None: - out_size = [40, 40] - Layer.__init__(self, prev_layer=[prev_layer, theta_layer], name=name) + super(SpatialTransformer2dAffineLayer, self).__init__(prev_layer=[prev_layer, theta_layer], name=name) + self.inputs = prev_layer.outputs self.theta_layer = theta_layer + + if out_size is None: + out_size = [40, 40] + logging.info("SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % (name, self.inputs.get_shape().as_list(), out_size)) with tf.variable_scope(name) as vs: diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py index 3aa50b1a0..4d5a472dd 100644 --- a/tensorlayer/layers/special_activation.py +++ b/tensorlayer/layers/special_activation.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'PReluLayer', ] @@ -16,7 +18,7 @@ class PReluLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer。 channel_shared : boolean If True, single weight is shared by all channels. @@ -33,6 +35,7 @@ class PReluLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -41,12 +44,15 @@ def __init__( a_init_args=None, # restore = True, name="prelu_layer"): + if a_init_args is None: a_init_args = {} - Layer.__init__(self, prev_layer=prev_layer, name=name) + super(PReluLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("PReluLayer %s: channel_shared:%s" % (name, channel_shared)) + self.inputs = prev_layer.outputs - logging.info("PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) + if channel_shared: w_shape = (1, ) else: diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py index 654eb0537..dd9bde8be 100644 --- a/tensorlayer/layers/stack.py +++ b/tensorlayer/layers/stack.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'StackLayer', 'UnStackLayer', @@ -42,15 +44,16 @@ def __init__( axis=1, name='stack', ): - Layer.__init__(self, prev_layer=layers, name=name) + + super(StackLayer, self).__init__(prev_layer=layers, name=name) + logging.info("StackLayer %s: axis: %d" % (name, axis)) + self.inputs = [] for l in layers: self.inputs.append(l.outputs) self.outputs = tf.stack(self.inputs, axis=axis, name=name) - logging.info("StackLayer %s: axis: %d" % (self.name, axis)) - # self.all_layers = list(layers[0].all_layers) # self.all_params = list(layers[0].all_params) # self.all_drop = dict(layers[0].all_drop) @@ -66,13 +69,14 @@ def __init__( self.all_layers.append(self.outputs) -def unstack_layer(layer, num=None, axis=0, name='unstack'): +@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def unstack_layer(prev_layer, num=None, axis=0, name='unstack'): """ It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() `__. Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer num : int or None The length of the dimension axis. Automatically inferred if None (the default). @@ -87,7 +91,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'): The list of layer objects unstacked from the input. """ - inputs = layer.outputs + inputs = prev_layer.outputs with tf.variable_scope(name): outputs = tf.unstack(inputs, num=num, axis=axis) @@ -101,7 +105,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'): full_name = name for i, _v in enumerate(outputs): - n = Layer(prev_layer=layer, name=full_name + str(i)) + n = Layer(prev_layer=prev_layer, name=full_name + str(i)) n.outputs = outputs[i] # n.all_layers = list(layer.all_layers) # n.all_params = list(layer.all_params) diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/super_resolution.py index d82cfd4a5..f00a6836b 100644 --- a/tensorlayer/layers/super_resolution.py +++ b/tensorlayer/layers/super_resolution.py @@ -5,19 +5,22 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'SubpixelConv1d', 'SubpixelConv2d', ] -def subpixel_conv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def subpixel_conv2d(prev_layer, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): """It is a 2D sub-pixel up-sampling layer, usually be used for Super-Resolution applications, see `SRGAN `__ for example. Parameters ------------ - net : :class:`Layer` + prev_layer : :class:`Layer` Previous layer, scale : int The up-scaling ratio, a wrong setting will lead to dimension size error. @@ -92,26 +95,27 @@ def _PS(X, r, n_out_channels): logging.info(_err_log) return X - inputs = net.outputs + inputs = prev_layer.outputs if n_out_channel is None: assert int(inputs.get_shape()[-1]) / (scale**2) % 1 == 0, _err_log n_out_channel = int(int(inputs.get_shape()[-1]) / (scale**2)) logging.info("SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) - net_new = Layer(prev_layer=net, name=name) #whole_name) + net_new = Layer(prev_layer=prev_layer, name=name) # with tf.name_scope(name): with tf.variable_scope(name): net_new.outputs = act(_PS(inputs, r=scale, n_out_channels=n_out_channel)) - # net_new.all_layers = list(net.all_layers) - # net_new.all_params = list(net.all_params) - # net_new.all_drop = dict(net.all_drop) + # net_new.all_layers = list(prev_layer.all_layers) + # net_new.all_params = list(prev_layer.all_params) + # net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.append(net_new.outputs) return net_new -def subpixel_conv1d(net, scale=2, act=tf.identity, name='subpixel_conv1d'): +@deprecated_alias(net='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release +def subpixel_conv1d(prev_layer, scale=2, act=tf.identity, name='subpixel_conv1d'): """It is a 1D sub-pixel up-sampling layer. Calls a TensorFlow function that directly implements this functionality. @@ -155,14 +159,14 @@ def _PS(I, r): logging.info("SubpixelConv1d %s: scale: %d act: %s" % (name, scale, act.__name__)) - inputs = net.outputs - net_new = Layer(prev_layer=net, name=name) + inputs = prev_layer.outputs + net_new = Layer(prev_layer=prev_layer, name=name) with tf.name_scope(name): net_new.outputs = act(_PS(inputs, r=scale)) - # net_new.all_layers = list(net.all_layers) - # net_new.all_params = list(net.all_params) - # net_new.all_drop = dict(net.all_drop) + # net_new.all_layers = list(prev_layer.all_layers) + # net_new.all_params = list(prev_layer.all_params) + # net_new.all_drop = dict(prev_layer.all_drop) net_new.all_layers.append(net_new.outputs) return net_new diff --git a/tensorlayer/layers/time_distribution.py b/tensorlayer/layers/time_distribution.py index 39a9d2193..fcec358e1 100644 --- a/tensorlayer/layers/time_distribution.py +++ b/tensorlayer/layers/time_distribution.py @@ -5,6 +5,8 @@ from .. import _logging as logging from .core import * +from ..deprecation import deprecated_alias + __all__ = [ 'TimeDistributedLayer', ] @@ -18,7 +20,7 @@ class TimeDistributedLayer(Layer): Parameters ---------- - layer : :class:`Layer` + prev_layer : :class:`Layer` Previous layer with output size of (batch_size, length, dim). layer_class : a :class:`Layer` class The layer class name. @@ -46,6 +48,7 @@ class TimeDistributedLayer(Layer): """ + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, @@ -53,14 +56,15 @@ def __init__( args=None, name='time_distributed', ): + super(TimeDistributedLayer, self).__init__(prev_layer=prev_layer, name=name) + logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) + if args is None: args = {} if not isinstance(args, dict): raise TypeError("'args' must be a dict.") - Layer.__init__(self, prev_layer=prev_layer, name=name) self.inputs = prev_layer.outputs - logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) if not isinstance(self.inputs, tf.Tensor): self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) diff --git a/tests/test_layers_basic.py b/tests/test_layers_basic.py index 1c246a550..09f4fd2c9 100644 --- a/tests/test_layers_basic.py +++ b/tests/test_layers_basic.py @@ -3,25 +3,25 @@ x = tf.placeholder(tf.float32, [None, 100]) n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, 80, name='d1') -n = tl.layers.DenseLayer(n, 80, name='d2') +n = tl.layers.DenseLayer(n, n_units=80, name='d1') +n = tl.layers.DenseLayer(n, n_units=80, name='d2') print(n) n.print_layers() n.print_params(False) print(n.count_params()) if n.count_params() != 14560: - raise Exception("params dont match") + raise Exception("params do not match") shape = n.outputs.get_shape().as_list() if shape[-1] != 80: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") for l in n: print(l) @@ -32,7 +32,7 @@ shape = n2.outputs.get_shape().as_list() if shape[-1] != 30: - raise Exception("shape dont match") + raise Exception("shape do not match") for l in n2: print(l) diff --git a/tests/test_layers_convolution.py b/tests/test_layers_convolution.py index 830a23090..b5560352d 100644 --- a/tests/test_layers_convolution.py +++ b/tests/test_layers_convolution.py @@ -8,13 +8,13 @@ n = tl.layers.Conv1dLayer(nin, shape=(5, 1, 32), stride=2) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2) print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # AtrousConv1dLayer @@ -33,29 +33,29 @@ print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=None, name='conv2d') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d') shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") n.print_params(False) if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") n = tl.layers.DeConv2dLayer(nin, shape=(5, 5, 32, 3), output_shape=(100, 200, 200, 32), strides=(1, 2, 2, 1), name='deconv2dlayer') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") print(nin.outputs) n = tl.layers.DeConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d') @@ -63,20 +63,20 @@ shape = n.outputs.get_shape().as_list() # if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 32): # TODO: why [None None None 32] ? if (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.DepthwiseConv2d(nin, (3, 3), (2, 2), tf.nn.relu, depth_multiplier=2, name='depthwise') +n = tl.layers.DepthwiseConv2d(nin, shape=(3, 3), strides=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 6): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), act=tf.nn.relu, name='conv2d2') -n = tl.layers.GroupConv2d(n, 32, (3, 3), (2, 2), name='group') +n = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='conv2d2') +n = tl.layers.GroupConv2d(n, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='group') print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 25) or (shape[2] != 25) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # n = UpSampling2dLayer # n = DownSampling2dLayer @@ -90,22 +90,22 @@ # AtrousConv2dLayer -n = tl.layers.SeparableConv2d(nin, 32, (3, 3), (1, 1), tf.nn.relu, name='seperable1') +n = tl.layers.SeparableConv2d(nin, n_filter=32, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, name='seperable1') n.print_layers() n.print_params(False) shape = n.outputs.get_shape().as_list() if shape[1:] != [98, 98, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 155: - raise Exception("params dont match") + raise Exception("params do not match") # exit() ## 3D @@ -116,14 +116,14 @@ print(n) shape = n.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 50) or (shape[4] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") # n = tl.layers.DeConv3dLayer(nin, shape=(2, 2, 2, 128, 3), output_shape=(100, 12, 32, 32, 128), strides=(1, 2, 2, 2, 1)) # print(n) # shape = n.outputs.get_shape().as_list() -n = tl.layers.DeConv3d(nin, 32, (3, 3, 3), (2, 2, 2)) +n = tl.layers.DeConv3d(nin, n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)) shape = n.outputs.get_shape().as_list() print(shape) if (shape[1] != 200) or (shape[2] != 200) or (shape[3] != 200) or (shape[4] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_core.py b/tests/test_layers_core.py index e0410c0f6..53f0d8a65 100644 --- a/tests/test_layers_core.py +++ b/tests/test_layers_core.py @@ -4,23 +4,23 @@ ## DenseLayer x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, 10, name='dense') +net = tl.layers.DenseLayer(net, n_units=10, name='dense') net.print_layers() net.print_params(False) shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 310: - raise Exception("params dont match") + raise Exception("params do not match") ## OneHotInputLayer x = tf.placeholder(tf.int32, shape=[None]) @@ -32,16 +32,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 8: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 0: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## Word2vecEmbeddingInputlayer batch_size = 8 @@ -57,16 +57,16 @@ shape = net.outputs.get_shape().as_list() if shape != [8, 200]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 401000: - raise Exception("params dont match") + raise Exception("params do not match") ## EmbeddingInputlayer batch_size = 8 @@ -78,16 +78,16 @@ shape = net.outputs.get_shape().as_list() if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 50000: - raise Exception("params dont match") + raise Exception("params do not match") ## AverageEmbeddingInputlayer batch_size = 8 @@ -100,16 +100,16 @@ shape = net.outputs.get_shape().as_list() if shape != [batch_size, 50]: # (8, 50) - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 1: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 50000: - raise Exception("params dont match") + raise Exception("params do not match") ## ReconLayer x = tf.placeholder(tf.float32, shape=(None, 784)) @@ -126,16 +126,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 784: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 308308: - raise Exception("params dont match") + raise Exception("params do not match") ## GaussianNoiseLayer x = tf.placeholder(tf.float32, shape=(64, 784)) @@ -148,16 +148,16 @@ shape = net.outputs.get_shape().as_list() if shape != [64, 100]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 78500: - raise Exception("params dont match") + raise Exception("params do not match") ## DropconnectDenseLayer x = tf.placeholder(tf.float32, shape=(64, 784)) @@ -170,13 +170,13 @@ shape = net.outputs.get_shape().as_list() if shape != [64, 100]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 88600: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_extend.py b/tests/test_layers_extend.py index d7469f5dc..56f512e37 100644 --- a/tests/test_layers_extend.py +++ b/tests/test_layers_extend.py @@ -4,25 +4,25 @@ ## 1D x = tf.placeholder(tf.float32, (None, 100)) n = tl.layers.InputLayer(x, name='in') -n = tl.layers.DenseLayer(n, 100, name='d1') -n = tl.layers.DenseLayer(n, 100, name='d2') +n = tl.layers.DenseLayer(n, n_units=100, name='d1') +n = tl.layers.DenseLayer(n, n_units=100, name='d2') -n = tl.layers.ExpandDimsLayer(n, 2) +n = tl.layers.ExpandDimsLayer(n, axis=2) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 1: - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.TileLayer(n, [-1, 1, 3]) +n = tl.layers.TileLayer(n, multiples=[-1, 1, 3]) print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 3: - raise Exception("shape dont match") + raise Exception("shape do not match") n.print_layers() n.print_params(False) # print(n.all_layers, n.all_params) if len(n.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_flow_control.py b/tests/test_layers_flow_control.py index 8d367367c..41e5c2a81 100644 --- a/tests/test_layers_flow_control.py +++ b/tests/test_layers_flow_control.py @@ -27,10 +27,10 @@ network.print_params(False) if len(network.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if len(network.all_layers) != 13: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(network.all_drop) != 5: - raise Exception("drop dont match") + raise Exception("drop do not match") diff --git a/tests/test_layers_importer.py b/tests/test_layers_importer.py index c869fe34e..44330a186 100644 --- a/tests/test_layers_importer.py +++ b/tests/test_layers_importer.py @@ -36,7 +36,7 @@ def keras_block(x): # logits, end_points = inception_v3(X, num_classes=1001, # is_training=False) network = tl.layers.SlimNetsLayer( - prev_layer=net_in, + net_in, slim_layer=inception_v3, slim_args={ 'num_classes': 1001, diff --git a/tests/test_layers_merge.py b/tests/test_layers_merge.py index 236ca0b33..c85688191 100644 --- a/tests/test_layers_merge.py +++ b/tests/test_layers_merge.py @@ -6,21 +6,21 @@ ## vector x = tf.placeholder(tf.float32, shape=[None, 784]) inputs = tl.layers.InputLayer(x, name='input_layer') -net1 = tl.layers.DenseLayer(inputs, 100, act=tf.nn.relu, name='relu1_1') -net2 = tl.layers.DenseLayer(inputs, 100, act=tf.nn.relu, name='relu2_1') -net = tl.layers.ConcatLayer([net1, net2], 1, name='concat_layer') +net1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu1_1') +net2 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='relu2_1') +net = tl.layers.ConcatLayer([net1, net2], concat_dim=1, name='concat_layer') net.print_params(False) net.print_layers() if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 157000: - raise Exception("params dont match") + raise Exception("params do not match") net_0 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_0') net_1 = tl.layers.DenseLayer(inputs, n_units=100, act=tf.nn.relu, name='net_1') @@ -30,36 +30,36 @@ net.print_layers() if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 157000: - raise Exception("params dont match") + raise Exception("params do not match") ## image x = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) inputs = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.Conv2d(inputs, 32, (3, 3), (2, 2), act=tf.nn.relu, name='c1') -net2 = tl.layers.Conv2d(inputs, 32, (3, 3), (2, 2), act=tf.nn.relu, name='c2') -net = tl.layers.ConcatLayer([net1, net2], -1, name='concat') +net1 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c1') +net2 = tl.layers.Conv2d(inputs, n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='c2') +net = tl.layers.ConcatLayer([net1, net2], concat_dim=-1, name='concat') net.print_params(False) net.print_layers() shape = net.outputs.get_shape().as_list() if shape[1:] != [50, 50, 64]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1792: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.ElementwiseLayer([net1, net2], combine_fn=tf.minimum, name='minimum2') net.print_params(False) @@ -67,13 +67,13 @@ shape = net.outputs.get_shape().as_list() if shape[1:] != [50, 50, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1792: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_normalization.py b/tests/test_layers_normalization.py index 839acd7d2..38480a1f5 100644 --- a/tests/test_layers_normalization.py +++ b/tests/test_layers_normalization.py @@ -5,9 +5,9 @@ def model(x, is_train=True, reuse=False): with tf.variable_scope("model", reuse=reuse): n = tl.layers.InputLayer(x, name='in') - n = tl.layers.Conv2d(n, 80, name='conv2d_1') + n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_1') n = tl.layers.BatchNormLayer(n, name='norm_batch') - n = tl.layers.Conv2d(n, 80, name='conv2d_2') + n = tl.layers.Conv2d(n, n_filter=80, name='conv2d_2') n = tl.layers.LocalResponseNormLayer(n, name='norm_local') n = tl.layers.LayerNormLayer(n, name='norm_layer') n = tl.layers.InstanceNormLayer(n, name='norm_instance') @@ -22,10 +22,10 @@ def model(x, is_train=True, reuse=False): net.print_params(False) if len(net.all_layers) != 6: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 60560: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_padding.py b/tests/test_layers_padding.py index a0c859436..8d3d7f065 100644 --- a/tests/test_layers_padding.py +++ b/tests/test_layers_padding.py @@ -1,59 +1,59 @@ import tensorflow as tf -from tensorlayer.layers import ZeroPad1d, ZeroPad2d, ZeroPad3d, InputLayer +import tensorlayer as tl ## 1D x = tf.placeholder(tf.float32, (None, 100, 1)) -n = InputLayer(x) -n1 = ZeroPad1d(n, padding=1) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad1d(n, padding=1) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [102, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad1d(n, padding=(2, 3)) +n2 = tl.layers.ZeroPad1d(n, padding=(2, 3)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [105, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 2D x = tf.placeholder(tf.float32, (None, 100, 100, 3)) -n = InputLayer(x) -n1 = ZeroPad2d(n, padding=2) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad2d(n, padding=2) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [104, 104, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad2d(n, padding=(2, 3)) +n2 = tl.layers.ZeroPad2d(n, padding=(2, 3)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [104, 106, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n3 = ZeroPad2d(n, padding=((3, 3), (4, 4))) +n3 = tl.layers.ZeroPad2d(n, padding=((3, 3), (4, 4))) n3.print_layers() shape = n3.outputs.get_shape().as_list() if shape[1:] != [106, 108, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 3D x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) -n = InputLayer(x) -n1 = ZeroPad3d(n, padding=2) +n = tl.layers.InputLayer(x) +n1 = tl.layers.ZeroPad3d(n, padding=2) n1.print_layers() shape = n1.outputs.get_shape().as_list() if shape[1:] != [104, 104, 104, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n2 = ZeroPad3d(n, padding=(2, 3, 4)) +n2 = tl.layers.ZeroPad3d(n, padding=(2, 3, 4)) n2.print_layers() shape = n2.outputs.get_shape().as_list() if shape[1:] != [104, 106, 108, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n3 = ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) +n3 = tl.layers.ZeroPad3d(n, padding=((3, 3), (4, 4), (5, 5))) n3.print_layers() shape = n3.outputs.get_shape().as_list() if shape[1:] != [106, 108, 110, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_pooling.py b/tests/test_layers_pooling.py index 6086c568d..b94cee8f5 100644 --- a/tests/test_layers_pooling.py +++ b/tests/test_layers_pooling.py @@ -4,89 +4,89 @@ ## 1D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 1)) nin = tl.layers.InputLayer(x, name='in1') -nin = tl.layers.Conv1d(nin, 32, 5, 2, name='conv1d') +nin = tl.layers.Conv1d(nin, n_filter=32, filter_size=5, stride=2, name='conv1d') print(nin) shape = nin.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MaxPool1d(nin, 3, 2, 'same', name='maxpool1d') +n = tl.layers.MaxPool1d(nin, filter_size=3, strides=2, padding='same', name='maxpool1d') print(n) shape = n.outputs.get_shape().as_list() # print(shape[1:3]) if shape[1:3] != [25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MeanPool1d(nin, 3, 2, 'same', name='meanpool1d') +n = tl.layers.MeanPool1d(nin, filter_size=3, strides=2, padding='same', name='meanpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[1:3] != [25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool1d(nin, name='maxpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool1d(nin, name='meanpool1d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 2D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 100, 3)) nin = tl.layers.InputLayer(x, name='in2') -nin = tl.layers.Conv2d(nin, 32, (3, 3), (2, 2), name='conv2d') +nin = tl.layers.Conv2d(nin, n_filter=32, filter_size=(3, 3), strides=(2, 2), name='conv2d') print(nin) shape = nin.outputs.get_shape().as_list() if (shape[1] != 50) or (shape[2] != 50) or (shape[3] != 32): - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MaxPool2d(nin, (3, 3), (2, 2), 'SAME', name='maxpool2d') +n = tl.layers.MaxPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool2d') print(n) shape = n.outputs.get_shape().as_list() # print(shape[1:3]) if shape[1:4] != [25, 25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") -n = tl.layers.MeanPool2d(nin, (3, 3), (2, 2), 'SAME', name='meanpool2d') +n = tl.layers.MeanPool2d(nin, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[1:4] != [25, 25, 32]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool2d(nin, name='maxpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool2d(nin, name='meanpool2d') print(n) shape = n.outputs.get_shape().as_list() if shape[-1] != 32: - raise Exception("shape dont match") + raise Exception("shape do not match") ## 3D ======================================================================== x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) nin = tl.layers.InputLayer(x, name='in') -n = tl.layers.MeanPool3d(nin, (3, 3, 3), (2, 2, 2), 'SAME', name='meanpool3d') +n = tl.layers.MeanPool3d(nin, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME', name='meanpool3d') print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 50, 50, 50, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMaxPool3d(nin) print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") n = tl.layers.GlobalMeanPool3d(nin) print(n) shape = n.outputs.get_shape().as_list() if shape != [None, 3]: - raise Exception("shape dont match") + raise Exception("shape do not match") diff --git a/tests/test_layers_recurrent.py b/tests/test_layers_recurrent.py index 2a9726e9e..c416d19a6 100644 --- a/tests/test_layers_recurrent.py +++ b/tests/test_layers_recurrent.py @@ -23,40 +23,41 @@ net.print_params(False) if len(net.all_layers) != 7: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 7: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 7790: - raise Exception("params dont match") + raise Exception("params do not match") ## CNN+RNN encoder ==================================================== image_size = 100 batch_size = 10 num_steps = 5 + x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) net = tl.layers.InputLayer(x, name='in') -net = tl.layers.Conv2d(net, 32, (5, 5), (2, 2), tf.nn.relu, name='cnn1') -net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool1') -net = tl.layers.Conv2d(net, 10, (5, 5), (2, 2), tf.nn.relu, name='cnn2') -net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool2') +net = tl.layers.Conv2d(net, n_filter=32, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn1') +net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool1') +net = tl.layers.Conv2d(net, n_filter=10, filter_size=(5, 5), strides=(2, 2), act=tf.nn.relu, name='cnn2') +net = tl.layers.MaxPool2d(net, filter_size=(2, 2), strides=(2, 2), name='pool2') net = tl.layers.FlattenLayer(net, name='flatten') -net = tl.layers.ReshapeLayer(net, shape=[-1, num_steps, int(net.outputs._shape[-1])]) +net = tl.layers.ReshapeLayer(net, shape=(-1, num_steps, int(net.outputs._shape[-1]))) rnn = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=num_steps, return_last=False, return_seq_2d=True, name='rnn') -net = tl.layers.DenseLayer(rnn, 3, name='out') +net = tl.layers.DenseLayer(rnn, n_units=3, name='out') net.print_layers() net.print_params(False) if len(net.all_layers) != 8: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 8: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 562245: - raise Exception("params dont match") + raise Exception("params do not match") ## Bidirectional Synced input and output batch_size = 10 @@ -73,16 +74,16 @@ shape = net.outputs.get_shape().as_list() if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 5: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 7160: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=2 net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='emb2') @@ -94,16 +95,16 @@ shape = net.outputs.get_shape().as_list() if shape[1:3] != [num_steps, hidden_size * 2]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 9: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 13720: - raise Exception("params dont match") + raise Exception("params do not match") ## ConvLSTMLayer TODO # image_size = 100 @@ -147,20 +148,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 5: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 4510: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=3 nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=vocab_size, embedding_size=embedding_size, name='seq_embedding2') @@ -178,7 +179,7 @@ shape = rnn.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -197,7 +198,7 @@ shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 3): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -213,7 +214,7 @@ net.print_params(False) shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") net = tl.layers.DynamicRNNLayer( nin, @@ -229,7 +230,7 @@ net.print_params(False) shape = net.outputs.get_shape().as_list() if (shape[-1] != embedding_size) or (len(shape) != 2): - raise Exception("shape dont match") + raise Exception("shape do not match") ## BiDynamic Synced input and output rnn = tl.layers.BiDynamicRNNLayer( @@ -248,20 +249,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size * 2: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 7: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 8390: - raise Exception("params dont match") + raise Exception("params do not match") # n_layer=2 rnn = tl.layers.BiDynamicRNNLayer( @@ -281,20 +282,20 @@ shape = rnn.outputs.get_shape().as_list() if shape[-1] != embedding_size * 2: - raise Exception("shape dont match") + raise Exception("shape do not match") shape = net.outputs.get_shape().as_list() if shape[-1] != vocab_size: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 11: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 18150: - raise Exception("params dont match") + raise Exception("params do not match") ## Seq2Seq from tensorlayer.layers import EmbeddingInputlayer, Seq2Seq, retrieve_seq_length_op2, DenseLayer @@ -333,13 +334,13 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10000: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 5: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 11: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 5293200: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_shape.py b/tests/test_layers_shape.py index 585fbf012..09efe5b80 100644 --- a/tests/test_layers_shape.py +++ b/tests/test_layers_shape.py @@ -12,35 +12,35 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 784: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 1: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## Reshape -net = tl.layers.ReshapeLayer(net, [-1, 28, 28, 1], name='reshape') +net = tl.layers.ReshapeLayer(net, shape=(-1, 28, 28, 1), name='reshape') net.print_layers() net.print_params(False) shape = net.outputs.get_shape().as_list() if shape[1:] != [28, 28, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") ## TransposeLayer net = tl.layers.TransposeLayer(net, perm=[0, 1, 3, 2], name='trans') @@ -50,13 +50,13 @@ shape = net.outputs.get_shape().as_list() if shape[1:] != [28, 1, 28]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 0: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 0: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_spatial_transformer.py b/tests/test_layers_spatial_transformer.py index af9021649..6935528a5 100644 --- a/tests/test_layers_spatial_transformer.py +++ b/tests/test_layers_spatial_transformer.py @@ -1,29 +1,29 @@ import tensorflow as tf -from tensorlayer.layers import InputLayer, FlattenLayer, DenseLayer, DropoutLayer, SpatialTransformer2dAffineLayer, Conv2d +import tensorlayer as tl x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) def model(x, is_train, reuse): with tf.variable_scope("STN", reuse=reuse): - nin = InputLayer(x, name='in') + nin = tl.layers.InputLayer(x, name='in') ## 1. Localisation network # use MLP as the localisation net - nt = FlattenLayer(nin, name='flatten') - nt = DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1') - nt = DropoutLayer(nt, 0.8, True, is_train, name='drop1') + nt = tl.layers.FlattenLayer(nin, name='flatten') + nt = tl.layers.DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1') + nt = tl.layers.DropoutLayer(nt, keep=0.8, is_fix=True, is_train=is_train, name='drop1') # you can also use CNN instead for MLP as the localisation net # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1') # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2') ## 2. Spatial transformer module (sampler) - n = SpatialTransformer2dAffineLayer(nin, nt, out_size=[40, 40], name='spatial') + n = tl.layers.SpatialTransformer2dAffineLayer(nin, theta_layer=nt, out_size=[40, 40], name='spatial') s = n ## 3. Classifier - n = Conv2d(n, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='conv1') - n = Conv2d(n, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='conv2') - n = FlattenLayer(n, name='flatten2') - n = DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1') - n = DenseLayer(n, n_units=10, act=tf.identity, name='out2') + n = tl.layers.Conv2d(n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv1') + n = tl.layers.Conv2d(n, n_filter=16, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, padding='SAME', name='conv2') + n = tl.layers.FlattenLayer(n, name='flatten2') + n = tl.layers.DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1') + n = tl.layers.DenseLayer(n, n_units=10, act=tf.identity, name='out2') return n, s @@ -35,13 +35,13 @@ def model(x, is_train, reuse): shape = s.outputs.get_shape().as_list() if shape[1:] != [40, 40, 1]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 9: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 12: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 1667980: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_special_activation.py b/tests/test_layers_special_activation.py index 137ee45ea..c50591eea 100644 --- a/tests/test_layers_special_activation.py +++ b/tests/test_layers_special_activation.py @@ -3,7 +3,7 @@ x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net = tl.layers.DenseLayer(net, 10, name='dense') +net = tl.layers.DenseLayer(net, n_units=10, name='dense') net = tl.layers.PReluLayer(net, name='prelu') net.print_layers() @@ -11,16 +11,16 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 3: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 320: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.PReluLayer(net, channel_shared=True, name='prelu2') @@ -29,13 +29,13 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 3: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 4: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 321: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_stack.py b/tests/test_layers_stack.py index db60318fe..b473c00d2 100644 --- a/tests/test_layers_stack.py +++ b/tests/test_layers_stack.py @@ -3,9 +3,9 @@ x = tf.placeholder(tf.float32, shape=[None, 30]) net = tl.layers.InputLayer(x, name='input') -net1 = tl.layers.DenseLayer(net, 10, name='dense1') -net2 = tl.layers.DenseLayer(net, 10, name='dense2') -net3 = tl.layers.DenseLayer(net, 10, name='dense3') +net1 = tl.layers.DenseLayer(net, n_units=10, name='dense1') +net2 = tl.layers.DenseLayer(net, n_units=10, name='dense2') +net3 = tl.layers.DenseLayer(net, n_units=10, name='dense3') net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack') net.print_layers() @@ -13,32 +13,32 @@ shape = net.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(net.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(net.all_params) != 6: - raise Exception("params dont match") + raise Exception("params do not match") if net.count_params() != 930: - raise Exception("params dont match") + raise Exception("params do not match") net = tl.layers.UnStackLayer(net, axis=1, name='unstack') for n in net: print(n, n.outputs) shape = n.outputs.get_shape().as_list() if shape[-1] != 10: - raise Exception("shape dont match") + raise Exception("shape do not match") # n.print_layers() # n.print_params(False) if len(n.all_layers) != 4: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 6: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 930: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_super_resolution.py b/tests/test_layers_super_resolution.py index 9b6a1d48e..4850d66ff 100644 --- a/tests/test_layers_super_resolution.py +++ b/tests/test_layers_super_resolution.py @@ -1,11 +1,11 @@ import tensorflow as tf -from tensorlayer.layers import SubpixelConv1d, SubpixelConv2d, InputLayer, Conv1d, Conv2d +import tensorlayer as tl ## 1D t_signal = tf.placeholder('float32', [10, 100, 4], name='x') -n = InputLayer(t_signal, name='in') -n = Conv1d(n, 32, 3, 1, padding='SAME', name='conv1d') -n = SubpixelConv1d(n, scale=2, name='subpixel') +n = tl.layers.InputLayer(t_signal, name='in') +n = tl.layers.Conv1d(n, n_filter=32, filter_size=3, stride=1, padding='SAME', name='conv1d') +n = tl.layers.SubpixelConv1d(n, scale=2, name='subpixel') print(n.outputs.shape) # ... (10, 200, 2) n.print_layers() @@ -13,22 +13,22 @@ shape = n.outputs.get_shape().as_list() if shape != [10, 200, 16]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 416: - raise Exception("params dont match") + raise Exception("params do not match") ## 2D x = tf.placeholder('float32', [10, 100, 100, 3], name='x') -n = InputLayer(x, name='in') -n = Conv2d(n, 32, (3, 2), (1, 1), padding='SAME', name='conv2d') -n = SubpixelConv2d(n, scale=2, name='subpixel2d') +n = tl.layers.InputLayer(x, name='in') +n = tl.layers.Conv2d(n, n_filter=32, filter_size=(3, 2), strides=(1, 1), padding='SAME', name='conv2d') +n = tl.layers.SubpixelConv2d(n, scale=2, name='subpixel2d') print(n.outputs.shape) n.print_layers() @@ -36,13 +36,13 @@ shape = n.outputs.get_shape().as_list() if shape != [10, 200, 200, 8]: - raise Exception("shape dont match") + raise Exception("shape do not match") if len(n.all_layers) != 2: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(n.all_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") if n.count_params() != 608: - raise Exception("params dont match") + raise Exception("params do not match") diff --git a/tests/test_layers_time_distributed.py b/tests/test_layers_time_distributed.py index ed805958d..7425c2f8e 100644 --- a/tests/test_layers_time_distributed.py +++ b/tests/test_layers_time_distributed.py @@ -12,11 +12,11 @@ net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense') if net.outputs.get_shape().as_list() != [32, 20, 50]: - raise Exception("shape dont match") + raise Exception("shape do not match") # ... (32, 20, 50) net.print_params(False) if net.count_params() != 5050: - raise Exception("params dont match") + raise Exception("params do not match") ## reuse diff --git a/tests/test_mnist_simple.py b/tests/test_mnist_simple.py index 84acd4b88..980053ef6 100644 --- a/tests/test_mnist_simple.py +++ b/tests/test_mnist_simple.py @@ -17,9 +17,9 @@ # define the network network = tl.layers.InputLayer(x, name='input') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') -network = tl.layers.DenseLayer(network, 100, tf.nn.relu, name='relu1') +network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu1') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop2') -network = tl.layers.DenseLayer(network, 100, tf.nn.relu, name='relu2') +network = tl.layers.DenseLayer(network, n_units=100, act=tf.nn.relu, name='relu2') network = tl.layers.DropoutLayer(network, keep=0.8, name='drop3') # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to # speed up computation, so we use identity here. diff --git a/tests/test_models.py b/tests/test_models.py index e8bbe9663..7800a918d 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -14,10 +14,10 @@ # use for inferencing probs = tf.nn.softmax(vgg.outputs) if len(vgg.all_layers) != 22: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg.all_params) != 32: - raise Exception("params dont match") + raise Exception("params do not match") with tf.Graph().as_default() as graph: # - Extract features with VGG16 and Train a classifier with 100 classes @@ -26,13 +26,13 @@ vgg = tl.models.VGG16(x, end_with='fc2_relu') if len(vgg.all_layers) != 21: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg.all_params) != 30: - raise Exception("params dont match") + raise Exception("params do not match") # add one more layer - net = tl.layers.DenseLayer(vgg, 100, name='out') + net = tl.layers.DenseLayer(vgg, n_units=100, name='out') # initialize all parameters # sess = tf.InteractiveSession() # tl.layers.initialize_global_variables(sess) @@ -41,7 +41,7 @@ # train your own classifier (only update the last layer) train_params = tl.layers.get_variables_with_name('out') if len(train_params) != 2: - raise Exception("params dont match") + raise Exception("params do not match") with tf.Graph().as_default() as graph: # - Reuse model @@ -56,7 +56,7 @@ # vgg1.restore_params(sess) if len(vgg1.all_layers) != 21: - raise Exception("layers dont match") + raise Exception("layers do not match") if len(vgg1.all_params) != 30: - raise Exception("params dont match") + raise Exception("params do not match")