diff --git a/example/tutorial_bipedalwalker_a3c_continuous_action.py b/example/tutorial_bipedalwalker_a3c_continuous_action.py
index ae3f66736..e861f5124 100644
--- a/example/tutorial_bipedalwalker_a3c_continuous_action.py
+++ b/example/tutorial_bipedalwalker_a3c_continuous_action.py
@@ -32,8 +32,6 @@
"""
import multiprocessing
-import os
-import shutil
import threading
import gym
@@ -257,8 +255,7 @@ def work(self):
# start TF threading
worker_threads = []
for worker in workers:
- job = lambda: worker.work()
- t = threading.Thread(target=job)
+ t = threading.Thread(target=worker.work)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
diff --git a/example/tutorial_cifar10_tfrecord.py b/example/tutorial_cifar10_tfrecord.py
index 6df316217..65d32c7ef 100644
--- a/example/tutorial_cifar10_tfrecord.py
+++ b/example/tutorial_cifar10_tfrecord.py
@@ -42,7 +42,7 @@
import io
import os
import time
-import numpy as np
+# import numpy as np
import tensorflow as tf
import tensorlayer as tl
from PIL import Image
diff --git a/example/tutorial_imdb_fasttext.py b/example/tutorial_imdb_fasttext.py
index a5c542dc0..437e62b76 100644
--- a/example/tutorial_imdb_fasttext.py
+++ b/example/tutorial_imdb_fasttext.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python
+"""
+This demo implements FastText[1] for sentence classification.
-__doc__ = """
-
-This demo implements FastText[1] for sentence classification. FastText is a
-simple model for text classification with performance often close to
-state-of-the-art, and is useful as a solid baseline.
+FastText is a simple model for text classification with performance often close
+to state-of-the-art, and is useful as a solid baseline.
There are some important differences between this implementation and what
is described in the paper. Instead of Hogwild! SGD[2], we use Adam optimizer
diff --git a/example/tutorial_inceptionV3_tfslim.py b/example/tutorial_inceptionV3_tfslim.py
index dd5e58901..08b98dc85 100644
--- a/example/tutorial_inceptionV3_tfslim.py
+++ b/example/tutorial_inceptionV3_tfslim.py
@@ -46,7 +46,8 @@ def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
- assert (0 <= img).all() and (img <= 1.0).all()
+ if ((0 <= img).all() and (img <= 1.0).all()) is False:
+ raise Exception("image value should be [0, 1]")
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
diff --git a/example/tutorial_matrix.py b/example/tutorial_matrix.py
index 50e329140..9c1478bd2 100644
--- a/example/tutorial_matrix.py
+++ b/example/tutorial_matrix.py
@@ -1,5 +1,4 @@
import tensorflow as tf
-import tensorlayer as tl
sess = tf.InteractiveSession()
diff --git a/example/tutorial_ptb_lstm.py b/example/tutorial_ptb_lstm.py
index d75f97940..ea1a18fff 100644
--- a/example/tutorial_ptb_lstm.py
+++ b/example/tutorial_ptb_lstm.py
@@ -104,7 +104,6 @@
import numpy as np
import tensorflow as tf
import tensorlayer as tl
-from tensorlayer.layers import set_keep
flags = tf.flags
flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")
diff --git a/example/tutorial_vgg16.py b/example/tutorial_vgg16.py
index 9a33aa693..1f376fd30 100644
--- a/example/tutorial_vgg16.py
+++ b/example/tutorial_vgg16.py
@@ -166,7 +166,7 @@ def conv_layers(net_in):
def conv_layers_simple_api(net_in):
- with tf.name_scope('preprocess') as scope:
+ with tf.name_scope('preprocess'):
"""
Notice that we include a preprocessing layer that takes the RGB image
with pixels values in the range of 0-255 and subtracts the mean image
diff --git a/tensorlayer/db.py b/tensorlayer/db.py
index 6b0b6c3fe..d4782c7b0 100644
--- a/tensorlayer/db.py
+++ b/tensorlayer/db.py
@@ -85,13 +85,16 @@ def __init__(self, ip='localhost', port=27017, db_name='db_name', user_name=None
self.db_name = db_name
self.user_name = user_name
+ @classmethod
def __autofill(self, args):
return args.update({'studyID': self.studyID})
- def __serialization(self, ps):
+ @staticmethod
+ def __serialization(ps):
return pickle.dumps(ps, protocol=2)
- def __deserialization(self, ps):
+ @staticmethod
+ def __deserialization(ps):
return pickle.loads(ps)
def save_params(self, params=None, args=None): #, file_name='parameters'):
@@ -298,20 +301,25 @@ def test_log(self, args=None):
return _result
@AutoFill
- def del_test_log(self, args={}):
+ def del_test_log(self, args=None):
""" Delete test log.
Parameters
-----------
args : dictionary, find items to delete, leave it empty to delete all log.
"""
+ if args is None:
+ args = {}
self.db.TestLog.delete_many(args)
print("[TensorDB] Delete TestLog SUCCESS")
- ## =========================== Network Architecture ================== ##
+ # =========================== Network Architecture ================== ##
@AutoFill
- def save_model_architecture(self, s, args={}):
+ def save_model_architecture(self, s, args=None):
+ if args is None:
+ args = {}
+
self.__autofill(args)
fid = self.archfs.put(s, filename="modelarchitecture")
args.update({"fid": fid})
diff --git a/tensorlayer/files.py b/tensorlayer/files.py
index b866f0dbc..e8fa7ff2b 100644
--- a/tensorlayer/files.py
+++ b/tensorlayer/files.py
@@ -594,7 +594,7 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False
else:
logging.info("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
- for idx in range(0, len(path_tags)):
+ for idx, _v in enumerate(path_tags):
tags = read_file(folder_tags + '/' + path_tags[idx]).split('\n')
# logging.info(idx+1, tags)
if tag is None or tag in tags:
@@ -1459,6 +1459,13 @@ def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list
def save_any_to_npy(save_dict={}, name='file.npy'):
"""Save variables to `.npy` file.
+ Parameters
+ ------------
+ save_dict : directory
+ The variables to be saved.
+ name : str
+ File name.
+
Examples
---------
>>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy')
@@ -1467,12 +1474,21 @@ def save_any_to_npy(save_dict={}, name='file.npy'):
... {'data': ['a','b']}
"""
+ if save_dict is None:
+ save_dict = {}
np.save(name, save_dict)
def load_npy_to_any(path='', name='file.npy'):
"""Load `.npy` file.
+ Parameters
+ ------------
+ path : str
+ Path to the file (optional).
+ name : str
+ File name.
+
Examples
---------
- see tl.files.save_any_to_npy()
diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py
index 748939716..25dae143f 100644
--- a/tensorlayer/layers/convolution.py
+++ b/tensorlayer/layers/convolution.py
@@ -302,7 +302,7 @@ def __init__(
logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding,
act.__name__))
# logging.info(" DeConv2dLayer: Untested")
- with tf.variable_scope(name) as vs:
+ with tf.variable_scope(name):
W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, dtype=D_TYPE, **W_init_args)
if b_init:
b = tf.get_variable(name='b_deconv2d', shape=(shape[-2]), initializer=b_init, dtype=D_TYPE, **b_init_args)
diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py
index e251bf7a1..c0d153823 100644
--- a/tensorlayer/layers/core.py
+++ b/tensorlayer/layers/core.py
@@ -6,7 +6,7 @@
import tensorflow as tf
from .. import _logging as logging
-from .. import cost, files, iterate, utils, visualize
+from .. import files, iterate, utils, visualize
# __all__ = [
# "Layer",
@@ -289,11 +289,13 @@ def list_remove_repeat(x):
"""
y = []
- [y.append(i) for i in x if not i in y]
+ for i in x:
+ if not i in y:
+ y.append(i)
return y
-def merge_networks(layers=[]):
+def merge_networks(layers=None):
"""Merge all parameters, layers and dropout probabilities to a :class:`Layer`.
The output of return network is the first network in the list.
@@ -314,6 +316,8 @@ def merge_networks(layers=[]):
>>> n1 = tl.layers.merge_networks([n1, n2])
"""
+ if layers is None:
+ raise Exception("layers should be a list of TensorLayer's Layers.")
layer = layers[0]
all_params = []
@@ -702,7 +706,7 @@ def __init__(
self.inputs = inputs
logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
- with tf.variable_scope(name) as vs:
+ with tf.variable_scope(name):
embeddings = tf.get_variable(name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=D_TYPE, **E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py
index e9a96fee9..bcf749d83 100644
--- a/tensorlayer/layers/extend.py
+++ b/tensorlayer/layers/extend.py
@@ -68,7 +68,7 @@ def __init__(
self.inputs = layer.outputs
logging.info("TileLayer %s: multiples:%s" % (self.name, multiples))
- with tf.variable_scope(name) as vs:
+ with tf.variable_scope(name):
self.outputs = tf.tile(self.inputs, multiples=multiples)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py
index 87711aef9..1dd02eb30 100644
--- a/tensorlayer/layers/importer.py
+++ b/tensorlayer/layers/importer.py
@@ -13,7 +13,7 @@ class LambdaLayer(Layer):
Previous layer.
fn : function
The function that applies to the outputs of previous layer.
- fn_args : dictionary
+ fn_args : dictionary or None
The arguments for the function (option).
name : str
A unique layer name.
@@ -47,9 +47,11 @@ def __init__(
self,
layer,
fn,
- fn_args={},
+ fn_args=None,
name='lambda_layer',
):
+ if fn_args is None:
+ fn_args = {}
Layer.__init__(self, name=name)
assert layer is not None
assert fn is not None
diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py
index 40a9c113f..1f4775c83 100644
--- a/tensorlayer/layers/merge.py
+++ b/tensorlayer/layers/merge.py
@@ -68,7 +68,6 @@ def __init__(
self.all_layers = list_remove_repeat(self.all_layers)
self.all_params = list_remove_repeat(self.all_params)
- #self.all_drop = list_remove_repeat(self.all_drop) # it is a dict
class ElementwiseLayer(Layer):
@@ -112,8 +111,8 @@ def __init__(
self.outputs = layers[0].outputs
# logging.info(self.outputs._shape, type(self.outputs._shape))
for l in layers[1:]:
- assert str(self.outputs.get_shape()) == str(
- l.outputs.get_shape()), "Hint: the input shapes should be the same. %s != %s" % (self.outputs.get_shape(), str(l.outputs.get_shape()))
+ if str(self.outputs.get_shape()) != str(l.outputs.get_shape()):
+ raise Exception("Hint: the input shapes should be the same. %s != %s" % (self.outputs.get_shape(), str(l.outputs.get_shape())))
self.outputs = combine_fn(self.outputs, l.outputs, name=name)
self.all_layers = list(layers[0].all_layers)
diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py
index e16dc0985..1bb79720c 100644
--- a/tensorlayer/layers/recurrent.py
+++ b/tensorlayer/layers/recurrent.py
@@ -256,7 +256,7 @@ class BiRNNLayer(Layer):
A TensorFlow core RNN cell.
- See `RNN Cells in TensorFlow `__.
- Note TF1.0+ and TF1.0- are different.
- cell_init_args : dictionary
+ cell_init_args : dictionary or None
The arguments for the cell function.
n_hidden : int
The number of hidden units in the layer.
@@ -316,10 +316,7 @@ def __init__(
self,
layer,
cell_fn,
- cell_init_args={
- 'use_peepholes': True,
- 'state_is_tuple': True
- },
+ cell_init_args=None,
n_hidden=100,
initializer=tf.random_uniform_initializer(-0.1, 0.1),
n_steps=5,
@@ -331,6 +328,9 @@ def __init__(
return_seq_2d=False,
name='birnn_layer',
):
+ if cell_init_args is None:
+ cell_init_args = {'use_peepholes': True, 'state_is_tuple': True}
+
Layer.__init__(self, name=name)
if cell_fn is None:
raise Exception("Please put in cell_fn")
@@ -427,7 +427,7 @@ def __init__(
if return_last:
raise Exception("Do not support return_last at the moment.")
- self.outputs = outputs[-1]
+ # self.outputs = outputs[-1]
else:
self.outputs = outputs
if return_seq_2d:
@@ -752,13 +752,13 @@ def __init__(
# Advanced Ops for Dynamic RNN
-def advanced_indexing_op(input, index):
+def advanced_indexing_op(inputs, index):
"""Advanced Indexing for Sequences, returns the outputs by given sequence lengths.
When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.
Parameters
-----------
- input : tensor for data
+ inputs : tensor for data
With shape of [batch_size, n_step(max), n_features]
index : tensor for indexing
Sequence length in Dynamic RNN. [batch_size]
@@ -790,12 +790,12 @@ def advanced_indexing_op(input, index):
- Modified from TFlearn (the original code is used for fixed length rnn), `references `__.
"""
- batch_size = tf.shape(input)[0]
- # max_length = int(input.get_shape()[1]) # for fixed length rnn, length is given
- max_length = tf.shape(input)[1] # for dynamic_rnn, length is unknown
- dim_size = int(input.get_shape()[2])
+ batch_size = tf.shape(inputs)[0]
+ # max_length = int(inputs.get_shape()[1]) # for fixed length rnn, length is given
+ max_length = tf.shape(inputs)[1] # for dynamic_rnn, length is unknown
+ dim_size = int(inputs.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (index - 1)
- flat = tf.reshape(input, [-1, dim_size])
+ flat = tf.reshape(inputs, [-1, dim_size])
relevant = tf.gather(flat, index)
return relevant
@@ -1076,7 +1076,7 @@ def __init__(
# cell_instance_fn1(),
# input_keep_prob=in_keep_prob,
# output_keep_prob=out_keep_prob)
- cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=1.0) #out_keep_prob)
+ cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=1.0)
else:
cell_creator = rnn_creator
self.cell = cell_creator()
@@ -1323,9 +1323,9 @@ def __init__(
cell_creator = lambda is_last=True: \
DropoutWrapper_fn(rnn_creator(),
input_keep_prob=in_keep_prob,
- output_keep_prob=out_keep_prob if is_last else 1.0) # out_keep_prob)
+ output_keep_prob=out_keep_prob if is_last else 1.0)
else:
- cell_creator = lambda: rnn_creator()
+ cell_creator = rnn_creator
# if dropout:
# self.fw_cell = DropoutWrapper_fn(self.fw_cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob)
@@ -1374,15 +1374,17 @@ def __init__(
rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
logging.info(" n_params : %d" % (len(rnn_variables)))
+
# Manage the outputs
try: # TF1.0
outputs = tf.concat(outputs, 2)
except Exception: # TF0.12
outputs = tf.concat(2, outputs)
+
if return_last:
# [batch_size, 2 * n_hidden]
- raise Exception("Do not support return_last at the moment")
- self.outputs = advanced_indexing_op(outputs, sequence_length)
+ raise NotImplementedError("Return last is not implemented yet.")
+ # self.outputs = advanced_indexing_op(outputs, sequence_length)
else:
# [batch_size, n_step(max), 2 * n_hidden]
if return_seq_2d:
@@ -1401,7 +1403,6 @@ def __init__(
self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, 2 * n_hidden])
except Exception: # TF0.12
self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, 2 * n_hidden])
- # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, 2 * n_hidden])
# Final state
self.fw_final_states = states_fw
@@ -1417,7 +1418,6 @@ def __init__(
self.all_params.extend(rnn_variables)
-# Seq2seq
class Seq2Seq(Layer):
"""
The :class:`Seq2Seq` class is a simple :class:`DynamicRNNLayer` based Seq2seq layer without using `tl.contrib.seq2seq `__.
diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py
index 576f353e1..1cf80acc9 100644
--- a/tensorlayer/layers/special_activation.py
+++ b/tensorlayer/layers/special_activation.py
@@ -50,7 +50,7 @@ def __init__(
alphas = tf.get_variable(name='alphas', shape=w_shape, initializer=a_init, dtype=D_TYPE, **a_init_args)
try: # TF 1.0
self.outputs = tf.nn.relu(self.inputs) + tf.multiply(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
- except: # TF 0.12
+ except Exception: # TF 0.12
self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5
self.all_layers = list(layer.all_layers)
diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py
index 1ec94ddfb..d5ba3928d 100644
--- a/tensorlayer/layers/stack.py
+++ b/tensorlayer/layers/stack.py
@@ -68,7 +68,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'):
"""
inputs = layer.outputs
- with tf.variable_scope(name) as vs:
+ with tf.variable_scope(name):
outputs = tf.unstack(inputs, num=num, axis=axis)
logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" % (name, num, axis, len(outputs)))
diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/super_resolution.py
index 2ce06ef47..02e41655e 100644
--- a/tensorlayer/layers/super_resolution.py
+++ b/tensorlayer/layers/super_resolution.py
@@ -94,7 +94,7 @@ def _PS(X, r, n_out_channels):
net_new = Layer(inputs, name=whole_name)
# with tf.name_scope(name):
- with tf.variable_scope(name) as vs:
+ with tf.variable_scope(name):
net_new.outputs = act(_PS(inputs, r=scale, n_out_channels=n_out_channel))
net_new.all_layers = list(net.all_layers)
diff --git a/tensorlayer/nlp.py b/tensorlayer/nlp.py
index 6d9efe0fb..b82f604a0 100755
--- a/tensorlayer/nlp.py
+++ b/tensorlayer/nlp.py
@@ -67,8 +67,11 @@ def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_inde
# global data_index # you can put data_index outside the function, then
# modify the global data_index in the function without return it.
# note: without using yield, this code use data_index to instead.
- assert batch_size % num_skips == 0
- assert num_skips <= 2 * skip_window
+
+ if batch_size % num_skips != 0:
+ raise Exception("batch_size should be able to be divided by num_skips.")
+ if num_skips > 2 * skip_window:
+ raise Exception("num_skips <= 2 * skip_window")
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
@@ -90,7 +93,7 @@ def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_inde
return batch, labels, data_index
-def sample(a=[], temperature=1.0):
+def sample(a=None, temperature=1.0):
"""Sample an index from a probability array.
Parameters
@@ -110,6 +113,8 @@ def sample(a=[], temperature=1.0):
- For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.
"""
+ if a is None:
+ raise Exception("a : list of float")
b = np.copy(a)
try:
if temperature == 1:
@@ -120,7 +125,7 @@ def sample(a=[], temperature=1.0):
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
- except:
+ except Exception:
# np.set_printoptions(threshold=np.nan)
# logging.info(a)
# logging.info(np.sum(a))
@@ -472,7 +477,7 @@ def read_words(filename="nietzsche.txt", replace=None):
with tf.gfile.GFile(filename, "r") as f:
try: # python 3.4 or older
context_list = f.read().replace(*replace).split()
- except: # python 3.5
+ except Exception: # python 3.5
f.seek(0)
replace = [x.encode('utf-8') for x in replace]
context_list = f.read().replace(*replace).split()
@@ -605,7 +610,7 @@ def build_reverse_dictionary(word_to_id):
return reverse_dictionary
-def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key='UNK'):
+def build_words_dataset(words=None, vocabulary_size=50000, printable=True, unk_key='UNK'):
"""Build the words dictionary and replace rare words with 'UNK' token.
The most common word has the smallest integer id.
@@ -645,6 +650,8 @@ def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `__
"""
+ if words is None:
+ raise Exception("words : list of str or byte")
import collections
count = [[unk_key, -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
@@ -665,12 +672,13 @@ def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key
if printable:
logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys()))
logging.info('Limited vocabulary size {}'.format(vocabulary_size))
- assert len(collections.Counter(words).keys()) >= vocabulary_size, \
- "the limited vocabulary_size must be less than or equal to the read vocabulary_size"
+ if len(collections.Counter(words).keys()) < vocabulary_size:
+ raise Exception(
+ "len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size")
return data, count, dictionary, reverse_dictionary
-def words_to_word_ids(data=[], word_to_id={}, unk_key='UNK'):
+def words_to_word_ids(data=None, word_to_id=None, unk_key='UNK'):
"""Convert a list of string (words) to IDs.
Parameters
@@ -705,6 +713,10 @@ def words_to_word_ids(data=[], word_to_id={}, unk_key='UNK'):
- `tensorflow.models.rnn.ptb.reader `__
"""
+ if data is None:
+ raise Exception("data : list of string or byte")
+ if word_to_id is None:
+ raise Exception("word_to_id : a dictionary")
# if isinstance(data[0], six.string_types):
# logging.info(type(data[0]))
# # exit()
@@ -832,13 +844,7 @@ def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")):
return [w for w in words if w]
-def create_vocabulary(vocabulary_path,
- data_path,
- max_vocabulary_size,
- tokenizer=None,
- normalize_digits=True,
- _DIGIT_RE=re.compile(br"\d"),
- _START_VOCAB=[b"_PAD", b"_GO", b"_EOS", b"_UNK"]):
+def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True, _DIGIT_RE=re.compile(br"\d"), _START_VOCAB=None):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
@@ -859,12 +865,18 @@ def create_vocabulary(vocabulary_path,
A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.
normalize_digits : boolean
If true, all digits are replaced by `0`.
+ _DIGIT_RE : regular expression function
+ Default is ``re.compile(br"\d")``.
+ _START_VOCAB : list of str
+ The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``.
References
----------
- Code from ``/tensorflow/models/rnn/translation/data_utils.py``
"""
+ if _START_VOCAB is None:
+ _START_VOCAB = [b"_PAD", b"_GO", b"_EOS", b"_UNK"]
if not gfile.Exists(vocabulary_path):
logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
diff --git a/tensorlayer/prepro.py b/tensorlayer/prepro.py
index 68d85b8a5..133ed4a36 100644
--- a/tensorlayer/prepro.py
+++ b/tensorlayer/prepro.py
@@ -794,7 +794,7 @@ def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False)
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
- assert len(x.shape) == 2
+ assert len(x.shape) == 2, "input should be grey-scale image"
shape = x.shape
@@ -843,7 +843,7 @@ def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random=
is_3d = True
elif len(data.shape) == 3 and data.shape[-1] != 1:
raise Exception("Only support greyscale image")
- assert len(data.shape) == 2
+ assert len(data.shape) == 2, "input should be grey-scale image"
dx = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
@@ -1270,7 +1270,7 @@ def imresize(x, size=None, interp='bicubic', mode=None):
# value scale
-def pixel_value_scale(im, val=0.9, clip=[], is_random=False):
+def pixel_value_scale(im, val=0.9, clip=(-np.inf, np.inf), is_random=False):
"""Scales each value in the pixels of the image.
Parameters
@@ -1281,6 +1281,10 @@ def pixel_value_scale(im, val=0.9, clip=[], is_random=False):
The scale value for changing pixel value.
- If is_random=False, multiply this value with all pixels.
- If is_random=True, multiply a value between [1-val, 1+val] with all pixels.
+ clip : tuple of 2 numbers
+ The minimum and maximum value.
+ is_random : boolean
+ If True, see ``val``.
Returns
-------
@@ -1306,6 +1310,8 @@ def pixel_value_scale(im, val=0.9, clip=[], is_random=False):
if len(clip) == 2:
im = np.clip(im, clip[0], clip[1])
+ else:
+ raise Exception("clip : tuple of 2 numbers")
return im
@@ -1708,9 +1714,11 @@ def projective_transform_by_points(x, src, dst, map_args=None, output_shape=None
"""
if map_args is None:
map_args = {}
- if type(src) is list: # convert to numpy
+ # if type(src) is list:
+ if isinstance(src, list): # convert to numpy
src = np.array(src)
- if type(dst) is list:
+ # if type(dst) is list:
+ if isinstance(dst, list):
dst = np.array(dst)
if np.max(x) > 1: # convert to [0, 1]
x = x / 255
@@ -1792,13 +1800,13 @@ def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low
return skimage.measure.find_contours(x, level, fully_connected=fully_connected, positive_orientation=positive_orientation)
-def pt2map(list_points=[], size=(100, 100), val=1):
+def pt2map(list_points=None, size=(100, 100), val=1):
"""Inputs a list of points, return a 2D image.
Parameters
--------------
list_points : list of 2 int
- [x, y] for point coordinates.
+ [[x, y], [x, y]..] for point coordinates.
size : tuple of 2 int
(w, h) for output size.
val : float or int
@@ -1810,6 +1818,8 @@ def pt2map(list_points=[], size=(100, 100), val=1):
An image.
"""
+ if list_points is None:
+ raise Exception("list_points : list of 2 int")
i_m = np.zeros(size)
if len(list_points) == 0:
return i_m
@@ -2195,9 +2205,9 @@ def parse_darknet_ann_list_to_cls_box(annotations):
"""
class_list = []
bbox_list = []
- for i in range(len(annotations)):
- class_list.append(annotations[i][0])
- bbox_list.append(annotations[i][1:])
+ for ann in annotations:
+ class_list.append(ann[0])
+ bbox_list.append(ann[1:])
return class_list, bbox_list
diff --git a/tensorlayer/rein.py b/tensorlayer/rein.py
index bec9daf38..c5c2d2811 100644
--- a/tensorlayer/rein.py
+++ b/tensorlayer/rein.py
@@ -118,7 +118,7 @@ def log_weight(probs, weights, name='log_weight'):
return exp_v
-def choice_action_by_probs(probs=[0.5, 0.5], action_list=None):
+def choice_action_by_probs(probs=(0.5, 0.5), action_list=None):
"""Choice and return an an action by given the action probability distribution.
Parameters
diff --git a/tensorlayer/utils.py b/tensorlayer/utils.py
index a63ff6e57..1f1bb469b 100644
--- a/tensorlayer/utils.py
+++ b/tensorlayer/utils.py
@@ -491,12 +491,23 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True):
## Random
-def get_random_int(min=0, max=10, number=5, seed=None):
+def get_random_int(min_v=0, max_v=10, number=5, seed=None):
"""Return a list of random integer by the given range and quantity.
+ Parameters
+ -----------
+ min_v : number
+ The minimum value.
+ max_v : number
+ The maximum value.
+ number : int
+ Number of value.
+ seed : int or None
+ The seed for random.
+
Examples
---------
- >>> r = get_random_int(min=0, max=10, number=5)
+ >>> r = get_random_int(min_v=0, max_v=10, number=5)
... [10, 2, 3, 3, 7]
"""
@@ -504,7 +515,7 @@ def get_random_int(min=0, max=10, number=5, seed=None):
if seed:
rnd = random.Random(seed)
# return [random.randint(min,max) for p in range(0, number)]
- return [rnd.randint(min, max) for p in range(0, number)]
+ return [rnd.randint(min_v, max_v) for p in range(0, number)]
def list_string_to_dict(string):
@@ -528,7 +539,7 @@ def exit_tensorflow(sess=None, port=6006):
"""
text = "[TL] Close tensorboard and nvidia-process if available"
text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on "
- if sess != None:
+ if sess is not None:
sess.close()
# import time
# time.sleep(2)
@@ -542,8 +553,7 @@ def exit_tensorflow(sess=None, port=6006):
logging.info('OS X: %s' % text)
subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard
elif _platform == "win32":
- logging.info(text2 + "Windows")
- # TODO
+ raise NotImplementedError("this function is not supported on the Windows platform")
else:
logging.info(text2 + _platform)
@@ -566,16 +576,14 @@ def open_tensorboard(log_dir='/tmp/tensorflow', port=6006):
logging.info("[TL] Log reportory was created at %s" % log_dir)
if _platform == "linux" or _platform == "linux2":
- logging.info('linux %s' % text2)
- # TODO
+ raise NotImplementedError()
elif _platform == "darwin":
logging.info('OS X: %s' % text)
subprocess.Popen(
sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + log_dir + " --port=" + str(port),
shell=True) # open tensorboard in localhost:6006/ or whatever port you chose
elif _platform == "win32":
- logging.info('Windows%s' % text2)
- # TODO
+ raise NotImplementedError("this function is not supported on the Windows platform")
else:
logging.info(_platform + text2)
diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py
index 3cec6225b..f1edbbd65 100644
--- a/tensorlayer/visualize.py
+++ b/tensorlayer/visualize.py
@@ -289,7 +289,7 @@ def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362):
for _ic in range(1, col + 1):
if count > n_mask:
break
- a = fig.add_subplot(col, row, count)
+ fig.add_subplot(col, row, count)
# logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5
# exit()
# plt.imshow(
@@ -349,8 +349,8 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, f
plt.ion() # active mode
fig = plt.figure(fig_idx)
count = 1
- for ir in range(1, row + 1):
- for ic in range(1, col + 1):
+ for _ir in range(1, row + 1):
+ for _ic in range(1, col + 1):
if count > n_mask:
break
fig.add_subplot(col, row, count)
@@ -424,7 +424,6 @@ def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable=
try:
from sklearn.manifold import TSNE
- import matplotlib.pyplot as plt
from six.moves import xrange
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
diff --git a/tests/test_pydocstyle.py b/tests/test_pydocstyle.py
index 96bba16c1..3dcb71329 100644
--- a/tests/test_pydocstyle.py
+++ b/tests/test_pydocstyle.py
@@ -1,5 +1,3 @@
-import os
-
from pydocstyle.checker import check
from pydocstyle.checker import violations
diff --git a/tests/test_yapf_format.py b/tests/test_yapf_format.py
index e98d36d23..e65675cef 100644
--- a/tests/test_yapf_format.py
+++ b/tests/test_yapf_format.py
@@ -1,8 +1,6 @@
-import os
import sys
-
-from yapf.yapflib.yapf_api import FormatCode
import testing
+from yapf.yapflib.yapf_api import FormatCode
def _read_utf_8_file(filename):