Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
32e92e2
__init__ import error message repositioned at a more appropriate loca…
Apr 10, 2018
90e41d5
gitignore updated with venv environment
Apr 10, 2018
d04a355
Typo fixed in tensorlayer.layers.convolution.py
Apr 10, 2018
e010005
Deprecation warning added for tl.layer.deconv2d with backward compati…
Apr 10, 2018
3a212df
Deprecation warning added for Layer API change: `layer` argument chan…
Apr 10, 2018
b70c154
Merge branch 'master' into deprecation_warning_fixes
DEKHTIARJonathan Apr 10, 2018
6029e08
test layers extend with argument names precised
Apr 10, 2018
1b1254c
tl.layers.core.py forgotten Classes with deprecation
Apr 10, 2018
d4f1c3a
Error fix in deprecation warning tl.layers.spatial_transformer.py
Apr 10, 2018
8730a25
Test refactored and fix with arguments missing added
Apr 10, 2018
831f7f4
ReshapeLayer error fix
Apr 10, 2018
4d8124e
test_layers_normalization argument name missing fixed
Apr 10, 2018
3304c44
error in tl.layers.ReshapeLayer test if shape is not empty fixed
Apr 10, 2018
a65ff54
test_layers_special_activation missing argument name fixed
Apr 10, 2018
6bedf2d
test_layers_stack missing argument name fixed
Apr 10, 2018
4ae2a0b
test_layers_super_resolution missing argument name fixed
Apr 10, 2018
dc88692
test_models missing argument name fixed
Apr 10, 2018
6c02e5b
Formating error fixed
Apr 10, 2018
7ed03d8
Merge branch 'master' into deprecation_warning_fixes
DEKHTIARJonathan Apr 10, 2018
7cda47d
Merge branch 'master' into deprecation_warning_fixes
zsdonghao Apr 11, 2018
ba047d2
Merge branch 'master' into deprecation_warning_fixes
DEKHTIARJonathan Apr 12, 2018
9702860
Decorator for deprecated argument added
Apr 12, 2018
8ea85df
Deprecation API Change - Use of newly implemented decorator. Docstrin…
Apr 12, 2018
c80058e
Codacy Issues Fix
Apr 12, 2018
6f05a43
Unnecessary PR changes removed - PR Cleaned & Refactored
Apr 13, 2018
55b829d
Merge branch 'master' into deprecation_warning_fixes
DEKHTIARJonathan Apr 13, 2018
af97cb1
Merge branch 'master' into deprecation_warning_fixes
zsdonghao Apr 13, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ dist
docs/_build
tensorlayer.egg-info
tensorlayer/__pacache__
venv/
2 changes: 1 addition & 1 deletion tensorlayer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
from __future__ import absolute_import

try:
install_instr = "Please make sure you install a recent enough version of TensorFlow."
import tensorflow
except ImportError:
install_instr = "Please make sure you install a recent enough version of TensorFlow."
raise ImportError("__init__.py : Could not import TensorFlow." + install_instr)

from . import activation
Expand Down
42 changes: 42 additions & 0 deletions tensorlayer/deprecation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#! /usr/bin/python
# -*- coding: utf-8 -*-

import functools
import warnings

from . import _logging as logging


def deprecated_alias(end_support_version, **aliases):
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):

try:
func_name = "{}.{}".format(args[0].__class__.__name__, f.__name__)
except (NameError, IndexError):
func_name = f.__name__

rename_kwargs(kwargs, aliases, end_support_version, func_name)

return f(*args, **kwargs)

return wrapper

return deco


def rename_kwargs(kwargs, aliases, end_support_version, func_name):

for alias, new in aliases.items():

if alias in kwargs:

if new in kwargs:
raise TypeError('{}() received both {} and {}'.format(func_name, alias, new))

warnings.warn('{}() - {} is deprecated; use {}'.format(func_name, alias, new), DeprecationWarning)
logging.warning("DeprecationWarning: {}(): "
"`{}` argument is deprecated and will be removed in version {}, "
"please change for `{}.`".format(func_name, alias, end_support_version, new))
kwargs[new] = kwargs.pop(alias)
99 changes: 58 additions & 41 deletions tensorlayer/layers/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
from .. import _logging as logging
from .core import *

from ..deprecation import deprecated_alias

__all__ = [
'BinaryDenseLayer',
'BinaryConv2d',
Expand Down Expand Up @@ -124,6 +126,7 @@ class BinaryDenseLayer(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand All @@ -136,13 +139,16 @@ def __init__(
b_init_args=None,
name='binary_dense',
):
super(BinaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info("BinaryDenseLayer %s: %d %s" % (name, n_units, act.__name__))

self.inputs = prev_layer.outputs

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2, please reshape or flatten it")

Expand All @@ -151,7 +157,7 @@ def __init__(

n_in = int(self.inputs.get_shape()[-1])
self.n_units = n_units
logging.info("BinaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__))

with tf.variable_scope(name):
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args)
# W = tl.act.sign(W) # dont update ...
Expand Down Expand Up @@ -228,6 +234,7 @@ class BinaryConv2d(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand Down Expand Up @@ -255,20 +262,20 @@ def __init__(
# data_format=None,
name='binary_cnn2d',
):
super(BinaryConv2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding,
act.__name__))

self.inputs = prev_layer.outputs

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}

if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if act is None:
act = tf.identity
logging.info("BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding,
act.__name__))
if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

if len(strides) != 2:
raise ValueError("len(strides) should be 2.")
Expand Down Expand Up @@ -324,6 +331,7 @@ class TernaryDenseLayer(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand All @@ -336,22 +344,24 @@ def __init__(
b_init_args=None,
name='ternary_dense',
):
super(TernaryDenseLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info("TernaryDenseLayer %s: %d %s" % (name, n_units, act.__name__))

self.inputs = prev_layer.outputs

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2, please reshape or flatten it")

if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

n_in = int(self.inputs.get_shape()[-1])
self.n_units = n_units
logging.info("TernaryDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__))

with tf.variable_scope(name):
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args)
# W = tl.act.sign(W) # dont update ...
Expand Down Expand Up @@ -430,6 +440,7 @@ class TernaryConv2d(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand Down Expand Up @@ -457,20 +468,18 @@ def __init__(
# data_format=None,
name='ternary_cnn2d',
):
super(TernaryConv2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding,
act.__name__))

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}

if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if act is None:
act = tf.identity
logging.info("TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding,
act.__name__))
if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

if len(strides) != 2:
raise ValueError("len(strides) should be 2.")
Expand Down Expand Up @@ -508,7 +517,7 @@ class DorefaDenseLayer(Layer):

Parameters
----------
layer : :class:`Layer`
prev_layer : :class:`Layer`
Previous layer.
bitW : int
The bits of this layer's parameter
Expand All @@ -533,6 +542,7 @@ class DorefaDenseLayer(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand All @@ -547,22 +557,24 @@ def __init__(
b_init_args=None,
name='dorefa_dense',
):
super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info("DorefaDenseLayer %s: %d %s" % (name, n_units, act.__name__))

self.inputs = prev_layer.outputs

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2, please reshape or flatten it")

if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

n_in = int(self.inputs.get_shape()[-1])
self.n_units = n_units
logging.info("DorefaDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__))

with tf.variable_scope(name):
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args)
# W = tl.act.sign(W) # dont update ...
Expand Down Expand Up @@ -596,7 +608,7 @@ class DorefaConv2d(Layer):

Parameters
----------
layer : :class:`Layer`
prev_layer : :class:`Layer`
Previous layer.
bitW : int
The bits of this layer's parameter
Expand Down Expand Up @@ -644,6 +656,7 @@ class DorefaConv2d(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
Expand Down Expand Up @@ -673,21 +686,22 @@ def __init__(
# data_format=None,
name='dorefa_cnn2d',
):
super(DorefaConv2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (name, n_filter, str(filter_size), str(strides), padding,
act.__name__))

self.inputs = prev_layer.outputs

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}
if act is None:
act = tf.identity

if use_gemm:
raise Exception("TODO. The current version use tf.matmul for inferencing.")

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs
if act is None:
act = tf.identity
logging.info("DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), padding,
act.__name__))

if len(strides) != 2:
raise ValueError("len(strides) should be 2.")
try:
Expand Down Expand Up @@ -720,23 +734,25 @@ class SignLayer(Layer):

Parameters
----------
layer : :class:`Layer`
prev_layer : :class:`Layer`
Previous layer.
name : a str
A unique layer name.

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
name='sign',
):
super(SignLayer, self).__init__(prev_layer=prev_layer, name=name)

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs

logging.info("SignLayer %s" % (self.name))

with tf.variable_scope(name):
# self.outputs = tl.act.sign(self.inputs)
self.outputs = quantize(self.inputs)
Expand All @@ -749,7 +765,7 @@ class ScaleLayer(Layer):

Parameters
----------
layer : :class:`Layer`
prev_layer : :class:`Layer`
Previous layer.
init_scale : float
The initial value for the scale factor.
Expand All @@ -758,17 +774,18 @@ class ScaleLayer(Layer):

"""

@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
init_scale=0.05,
name='scale',
):
super(ScaleLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info("ScaleLayer %s: init_scale: %f" % (name, init_scale))

Layer.__init__(self, prev_layer=prev_layer, name=name)
self.inputs = prev_layer.outputs

logging.info("ScaleLayer %s: init_scale: %f" % (self.name, init_scale))
with tf.variable_scope(name):
# scale = tf.get_variable(name='scale_factor', init, trainable=True, )
scale = tf.get_variable("scale", shape=[1], initializer=tf.constant_initializer(value=init_scale))
Expand Down
Loading