Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
<img src="img/tl_transparent_logo.png" width="50%" height="30%"/>
</div>
</a>

[![Codacy Badge](https://api.codacy.com/project/badge/Grade/ca2a29ddcf7445588beff50bee5406d9)](https://app.codacy.com/app/tensorlayer/tensorlayer?utm_source=github.com&utm_medium=referral&utm_content=tensorlayer/tensorlayer&utm_campaign=badger)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tensorlayer/Lobby#?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
[![Build Status](https://travis-ci.org/tensorlayer/tensorlayer.svg?branch=master)](https://travis-ci.org/tensorlayer/tensorlayer)
Expand Down
2 changes: 1 addition & 1 deletion docs/modules/layers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ APIs may better for you.

2D Deconvolution
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: DeConv2d
.. autoclass:: DeConv2d

3D Deconvolution
^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand Down
3 changes: 1 addition & 2 deletions example/tutorial_mnist_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@
# -*- coding: utf-8 -*-

import tensorflow as tf

import tensorlayer as tl

sess = tf.InteractiveSession()

# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1,784))
tl.files.load_mnist_dataset(shape=(-1, 784))
# define placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')
Expand Down
154 changes: 72 additions & 82 deletions tensorlayer/layers/convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -1415,6 +1415,7 @@ def __init__(
strides=(1, 1),
act=tf.identity,
padding='SAME',
dilation_rate=(1, 1),
W_init=tf.truncated_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(value=0.0),
W_init_args=None,
Expand Down Expand Up @@ -1472,7 +1473,7 @@ def __init__(
strides=strides,
padding=padding,
data_format='channels_last',
dilation_rate=(1, 1),
dilation_rate=dilation_rate,
activation=act,
use_bias=(False if b_init is None else True),
kernel_initializer=W_init, #None,
Expand Down Expand Up @@ -1520,20 +1521,7 @@ def __init__(
self.all_params.append(W)


@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release
def deconv2d(prev_layer,
n_filter,
filter_size=(3, 3),
out_size=(30, 30),
strides=(2, 2),
padding='SAME',
batch_size=None,
act=tf.identity,
W_init=tf.truncated_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(value=0.0),
W_init_args=None,
b_init_args=None,
name='decnn2d'):
class DeConv2d(Layer):
"""Simplified version of :class:`DeConv2dLayer`.

Parameters
Expand All @@ -1550,8 +1538,8 @@ def deconv2d(prev_layer,
The stride step (height, width).
padding : str
The padding algorithm type: "SAME" or "VALID".
batch_size : int
Require if TF version < 1.3, int or None.
batch_size : int or None
Require if TF < 1.3, int or None.
If None, try to find the `batch_size` from the first dim of net.outputs (you should define the `batch_size` in the input placeholder).
act : activation function
The activation function of this layer.
Expand All @@ -1560,79 +1548,81 @@ def deconv2d(prev_layer,
b_init : initializer or None
The initializer for the bias vector. If None, skip biases.
W_init_args : dictionary
The arguments for the weight matrix initializer.
The arguments for the weight matrix initializer (For TF < 1.3).
b_init_args : dictionary
The arguments for the bias vector initializer.
The arguments for the bias vector initializer (For TF < 1.3).
name : str
A unique layer name.

Returns
-------
:class:`Layer`
A :class:`DeConv2dLayer` object.

"""

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please re-add the deprecation header, just above the init function

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
@deprecated_alias(layer='prev_layer', n_out_channel='n_filter', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
n_filter=32,
filter_size=(3, 3),
out_size=(30, 30), # remove
strides=(2, 2),
padding='SAME',
batch_size=None, # remove
act=tf.identity,
W_init=tf.truncated_normal_initializer(stddev=0.02),
b_init=tf.constant_initializer(value=0.0),
W_init_args=None, # remove
b_init_args=None, # remove
name='decnn2d'):
super(DeConv2d, self).__init__(prev_layer=prev_layer, name=name)
logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))

if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}
if act is None:
act = tf.identity

if len(strides) != 2:
raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.")

if tf.__version__ > '1.3':
inputs = prev_layer.outputs
scope_name = tf.get_variable_scope().name
# if scope_name:
# whole_name = scope_name + '/' + name
# else:
# whole_name = name
net_new = Layer(prev_layer=None, name=name)
# with tf.name_scope(name):
with tf.variable_scope(name) as vs:
net_new.outputs = tf.contrib.layers.conv2d_transpose(
inputs=inputs,
num_outputs=n_filter,
if W_init_args is None:
W_init_args = {}
if b_init_args is None:
b_init_args = {}
if act is None:
act = tf.identity

if len(strides) != 2:
raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.")

if tf.__version__ > '1.3':
self.inputs = prev_layer.outputs
# scope_name = tf.get_variable_scope().name
conv2d_transpose = tf.layers.Conv2DTranspose(
filters=n_filter,
kernel_size=filter_size,
stride=strides,
strides=strides,
padding=padding,
activation_fn=act,
weights_initializer=W_init,
biases_initializer=b_init,
scope=name)
new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
net_new.all_layers = list(prev_layer.all_layers)
net_new.all_params = list(prev_layer.all_params)
net_new.all_drop = dict(prev_layer.all_drop)
net_new.all_layers.extend([net_new.outputs])
net_new.all_params.extend(new_variables)
return net_new
else:
if batch_size is None:
# batch_size = tf.shape(net.outputs)[0]
fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
from tensorflow.python.ops import array_ops
batch_size = array_ops.shape(prev_layer.outputs)[0]
return DeConv2dLayer(
prev_layer=prev_layer,
act=act,
shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])),
output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter),
strides=(1, strides[0], strides[1], 1),
padding=padding,
W_init=W_init,
b_init=b_init,
W_init_args=W_init_args,
b_init_args=b_init_args,
name=name)
activation=act,
kernel_initializer=W_init,
bias_initializer=b_init,
name=name)
self.outputs = conv2d_transpose(self.inputs)
new_variables = conv2d_transpose.weights # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
self.all_layers.append(self.outputs)
self.all_params.extend(new_variables)
else:
raise RuntimeError("please update TF > 1.3 or downgrade TL < 1.8.4")
# if batch_size is None:
# # batch_size = tf.shape(net.outputs)[0]
# fixed_batch_size = prev_layer.outputs.get_shape().with_rank_at_least(1)[0]
# if fixed_batch_size.value:
# batch_size = fixed_batch_size.value
# else:
# from tensorflow.python.ops import array_ops
# batch_size = array_ops.shape(prev_layer.outputs)[0]
# return DeConv2dLayer(
# prev_layer=prev_layer,
# act=act,
# shape=(filter_size[0], filter_size[1], n_filter, int(prev_layer.outputs.get_shape()[-1])),
# output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter),
# strides=(1, strides[0], strides[1], 1),
# padding=padding,
# W_init=W_init,
# b_init=b_init,
# W_init_args=W_init_args,
# b_init_args=b_init_args,
# name=name)


class DeConv3d(Layer):
Expand Down Expand Up @@ -2022,4 +2012,4 @@ def __init__(
AtrousConv1dLayer = atrous_conv1d
Conv1d = conv1d
# Conv2d = conv2d
DeConv2d = deconv2d
# DeConv2d = deconv2d