Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
864d7cf
Activation Cleaning Docstring Test
Apr 19, 2018
04023ba
Requirements Pinned with range to insure tested versions are used. Ra…
Apr 19, 2018
9167833
setup.cfg file added with PEP8 configuration
Apr 19, 2018
3a4cda7
activation.py refactored
Apr 19, 2018
1af0863
docstring fixed - ready for documentation unittest
Apr 19, 2018
ea37ed2
Merge branch 'master' into requirements_pinning
DEKHTIARJonathan Apr 19, 2018
0174a27
Merge branch 'requirements_pinning' into documentation_cleaning
Apr 19, 2018
7ef9e53
Yapf correction for max_line_length: 120
Apr 19, 2018
dc46dfc
test yapf refactored
Apr 19, 2018
25ecace
Merge branch 'master' into documentation_cleaning
DEKHTIARJonathan Apr 19, 2018
40b196e
Requirements conflict solved
Apr 19, 2018
8a52165
Yapf Style modified and merged in file "setup.cfg"
Apr 20, 2018
5b9a699
Yapf Confiuguration Updated
Apr 20, 2018
c7365f5
Code Refactored with new YAPF formating style
Apr 20, 2018
835092a
Code Refactored with new YAPF formating style
Apr 20, 2018
078d804
Code Refactored with new YAPF formating style
Apr 20, 2018
e797889
Merge branch 'master' into documentation_cleaning
DEKHTIARJonathan Apr 20, 2018
f726da2
tl.layers.pooling YAPF reformat
Apr 20, 2018
38f61f2
Merge branch 'master' into documentation_cleaning
Apr 20, 2018
7a6e383
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer i…
Apr 20, 2018
034b3df
yapf updated
Apr 20, 2018
45e4772
gitignore updated
Apr 20, 2018
e2021f5
YAPF Style Fixing Attempt
Apr 20, 2018
4d75516
Space Error Fix
Apr 20, 2018
6092b6c
Style Correction
Apr 20, 2018
240f0df
Assertion Codacy Errors Corrected
Apr 20, 2018
2c97f69
Error Fix
Apr 20, 2018
9966722
Assertion Refactored
Apr 20, 2018
a704cb7
Merge branch 'master' into documentation_cleaning
zsdonghao Apr 20, 2018
fd26da0
Merge branch 'master' into documentation_cleaning
wagamamaz Apr 20, 2018
01311e2
YAPF Style Applied to Master
Apr 20, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,5 @@ tensorlayer.egg-info
tensorlayer/__pacache__
venv/
.pytest_cache/
update_tl.bat
update_tl.py
60 changes: 58 additions & 2 deletions .style.yapf
Original file line number Diff line number Diff line change
@@ -1,4 +1,60 @@
[style]
based_on_style = pep8
based_on_style=google

# The number of columns to use for indentation.
indent_width = 4
column_limit = 160

# The column limit.
column_limit=120

# Place each dictionary entry onto its own line.
each_dict_entry_on_separate_line = True

# Put closing brackets on a separate line, dedented, if the bracketed
# expression can't fit in a single line. Applies to all kinds of brackets,
# including function definitions and calls. For example:
#
# config = {
# 'key1': 'value1',
# 'key2': 'value2',
# } # <--- this bracket is dedented and on a separate line
#
# time_series = self.remote_client.query_entity_counters(
# entity='dev3246.region1',
# key='dns.query_latency_tcp',
# transform=Transformation.AVERAGE(window=timedelta(seconds=60)),
# start_ts=now()-timedelta(days=3),
# end_ts=now(),
# ) # <--- this bracket is dedented and on a separate line
dedent_closing_brackets=True

# Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set
coalesce_brackets = False

# Align closing bracket with visual indentation.
align_closing_bracket_with_visual_indent = False

# Split named assignments onto individual lines.
split_before_named_assigns = False

# If an argument / parameter list is going to be split, then split before the first argument.
split_before_first_argument = True

# Split before arguments if the argument list is terminated by a comma.
split_arguments_when_comma_terminated = False

# Insert a space between the ending comma and closing bracket of a list, etc.
space_between_ending_comma_and_closing_bracket = True

# Join short lines into one line. E.g., single line if statements.
join_multiple_lines = True

# Do not include spaces around selected binary operators.
# Example: 1 + 2 * 3 - 4 / 5 => 1 + 2*3 - 4/5
no_spaces_around_selected_binary_operators = True

# Allow lambdas to be formatted on more than one line.
allow_multiline_lambdas = True

SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10
SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500
1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ method, this part of the documentation is for you.
modules/activation
modules/models
modules/distributed
modules/db


Command-line Reference
Expand Down
8 changes: 6 additions & 2 deletions example/tutorial_atari_pong.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,11 @@ def prepro(I):
prev_x = None

if reward != 0:
print(('episode %d: game %d took %.5fs, reward: %f' % (episode_number, game_number, time.time() - start_time, reward)),
('' if reward == -1 else ' !!!!!!!!'))
print(
(
'episode %d: game %d took %.5fs, reward: %f' %
(episode_number, game_number, time.time() - start_time, reward)
), ('' if reward == -1 else ' !!!!!!!!')
)
start_time = time.time()
game_number += 1
57 changes: 42 additions & 15 deletions example/tutorial_binarynet_cifar10_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,9 @@ def data_to_tfrecord(images, labels, filename):
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}))
}
)
)
writer.write(example.SerializeToString()) # Serialize To String
writer.close()

Expand All @@ -97,12 +99,13 @@ def read_and_decode(filename, is_train=None):
serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
}
)
# You can do more image distortion here for training data
img = tf.decode_raw(features['img_raw'], tf.float32)
img = tf.reshape(img, [32, 32, 3])
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
if is_train == True:
if is_train ==True:
# 1. Randomly crop a [height, width] section of the image.
img = tf.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
Expand Down Expand Up @@ -147,9 +150,12 @@ def read_and_decode(filename, is_train=None):
x_test_, y_test_ = read_and_decode("test.cifar10", False)

x_train_batch, y_train_batch = tf.train.shuffle_batch(
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) # set the number of threads here
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
) # set the number of threads here
# for testing, uses batch instead of shuffle_batch
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32)
x_test_batch, y_test_batch = tf.train.batch(
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
)

def model(x_crop, y_, reuse):
""" For more simplified CNN APIs, check tensorlayer.org """
Expand All @@ -161,16 +167,28 @@ def model(x_crop, y_, reuse):
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
net = tl.layers.SignLayer(net)
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')
net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
net = tl.layers.LocalResponseNormLayer(
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1'
)
net = tl.layers.BinaryConv2d(
net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2'
)
net = tl.layers.LocalResponseNormLayer(
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2'
)
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryDenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
net = tl.layers.BinaryDenseLayer(
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
) # output: (batch_size, 384)
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryDenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
net = tl.layers.BinaryDenseLayer(
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
) # output: (batch_size, 192)
net = tl.layers.DenseLayer(
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
) # output: (batch_size, 10)
y = net.outputs

ce = tl.cost.cross_entropy(y, y_, name='cost')
Expand Down Expand Up @@ -201,9 +219,15 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2')
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
net = tl.layers.DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
net = tl.layers.DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
net = tl.layers.DenseLayer(
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
) # output: (batch_size, 384)
net = tl.layers.DenseLayer(
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
) # output: (batch_size, 192)
net = tl.layers.DenseLayer(
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
) # output: (batch_size, 10)
y = net.outputs

ce = tl.cost.cross_entropy(y, y_, name='cost')
Expand Down Expand Up @@ -273,7 +297,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
n_batch += 1

if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch %d : Step %d-%d of %d took %fs" % (epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
print(
"Epoch %d : Step %d-%d of %d took %fs" %
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
)
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))

Expand Down
15 changes: 12 additions & 3 deletions example/tutorial_bipedalwalker_a3c_continuous_action.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@


class ACNet(object):

def __init__(self, scope, globalAC=None):
self.scope = scope
if scope == GLOBAL_NET_SCOPE:
Expand Down Expand Up @@ -144,7 +145,8 @@ def _build_net(self):
self.v = v.outputs

def update_global(self, feed_dict): # run by a local
_, _, t = sess.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
_, _, t = sess.run([self.update_a_op, self.update_c_op, self.test],
feed_dict) # local grads applies to global net
return t

def pull_global(self): # run by a local
Expand All @@ -156,14 +158,18 @@ def choose_action(self, s): # run by a local

def save_ckpt(self):
tl.files.exists_or_mkdir(self.scope)
tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=self.a_params + self.c_params, save_dir=self.scope, printable=True)
tl.files.save_ckpt(
sess=sess, mode_name='model.ckpt', var_list=self.a_params + self.c_params, save_dir=self.scope,
printable=True
)

def load_ckpt(self):
tl.files.load_ckpt(sess=sess, var_list=self.a_params + self.c_params, save_dir=self.scope, printable=True)
# tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=self.a_params+self.c_params, save_dir=self.scope, is_latest=False, printable=True)


class Worker(object):

def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
Expand Down Expand Up @@ -202,7 +208,10 @@ def work(self):
buffer_v_target.append(v_s_)
buffer_v_target.reverse()

buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
buffer_s, buffer_a, buffer_v_target = (
np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
)

feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
Expand Down
15 changes: 12 additions & 3 deletions example/tutorial_cartpole_ac.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@


class Actor(object):

def __init__(self, sess, n_features, n_actions, lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
Expand All @@ -85,7 +86,9 @@ def __init__(self, sess, n_features, n_actions, lr=0.001):

# Hao Dong
with tf.variable_scope('loss'):
self.exp_v = tl.rein.cross_entropy_reward_loss(logits=self.acts_logits, actions=self.a, rewards=self.td_error, name='actor_weighted_loss')
self.exp_v = tl.rein.cross_entropy_reward_loss(
logits=self.acts_logits, actions=self.a, rewards=self.td_error, name='actor_weighted_loss'
)

with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.exp_v)
Expand Down Expand Up @@ -113,6 +116,7 @@ def choose_action_greedy(self, s):


class Critic(object):

def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
Expand Down Expand Up @@ -143,7 +147,9 @@ def learn(self, s, r, s_):
sess = tf.Session()

actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor
critic = Critic(
sess, n_features=N_F, lr=LR_C
) # we need a good teacher, so the teacher should learn faster than the actor

tl.layers.initialize_global_variables(sess)

Expand Down Expand Up @@ -187,7 +193,10 @@ def learn(self, s, r, s_):
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
# start rending if running_reward greater than a threshold
# if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True
print("Episode: %d reward: %f running_reward %f took: %.5f" % (i_episode, ep_rs_sum, running_reward, time.time() - episode_time))
print(
"Episode: %d reward: %f running_reward %f took: %.5f" %
(i_episode, ep_rs_sum, running_reward, time.time() - episode_time)
)

# Early Stopping for quick check
if t >= MAX_EP_STEPS:
Expand Down
34 changes: 22 additions & 12 deletions example/tutorial_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,15 @@ def model(x, y_, reuse):
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
net = DenseLayer(
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
) # output: (batch_size, 384)
net = DenseLayer(
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
) # output: (batch_size, 192)
net = DenseLayer(
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
) # output: (batch_size, 10)
y = net.outputs

ce = tl.cost.cross_entropy(y, y_, name='cost')
Expand Down Expand Up @@ -89,9 +95,15 @@ def model_batch_norm(x, y_, reuse, is_train):
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)

net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
net = DenseLayer(
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
) # output: (batch_size, 384)
net = DenseLayer(
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
) # output: (batch_size, 192)
net = DenseLayer(
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
) # output: (batch_size, 10)
y = net.outputs

ce = tl.cost.cross_entropy(y, y_, name='cost')
Expand Down Expand Up @@ -136,11 +148,8 @@ def distort_fn(x, is_train=False):
return x


x = tf.placeholder(tf.float32, shape=[None, 24, 24, 3], name='x')
y_ = tf.placeholder(
tf.int64, shape=[
None,
], name='y_')
x = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x')
y_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_')

## using local response normalization
# network, cost, _ = model(x, y_, False)
Expand All @@ -156,7 +165,8 @@ def distort_fn(x, is_train=False):
batch_size = 128

train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(cost, var_list=train_params)

tl.layers.initialize_global_variables(sess)

Expand Down
Loading