-
Notifications
You must be signed in to change notification settings - Fork 1.6k
[Archive - Stale] - Documentation Fix to allow unittest to properly run #509
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
864d7cf
04023ba
9167833
3a4cda7
1af0863
ea37ed2
0174a27
7ef9e53
dc46dfc
25ecace
40b196e
8a52165
5b9a699
c7365f5
835092a
078d804
e797889
f726da2
38f61f2
7a6e383
034b3df
45e4772
e2021f5
4d75516
6092b6c
240f0df
2c97f69
9966722
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,3 +12,5 @@ tensorlayer.egg-info | |
| tensorlayer/__pacache__ | ||
| venv/ | ||
| .pytest_cache/ | ||
| update_tl.bat | ||
| update_tl.py | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,60 @@ | ||
| [style] | ||
| based_on_style = pep8 | ||
| based_on_style=google | ||
|
|
||
| # The number of columns to use for indentation. | ||
| indent_width = 4 | ||
| column_limit = 160 | ||
|
|
||
| # The column limit. | ||
| column_limit=120 | ||
|
|
||
| # Place each dictionary entry onto its own line. | ||
| each_dict_entry_on_separate_line = True | ||
|
|
||
| # Put closing brackets on a separate line, dedented, if the bracketed | ||
| # expression can't fit in a single line. Applies to all kinds of brackets, | ||
| # including function definitions and calls. For example: | ||
| # | ||
| # config = { | ||
| # 'key1': 'value1', | ||
| # 'key2': 'value2', | ||
| # } # <--- this bracket is dedented and on a separate line | ||
| # | ||
| # time_series = self.remote_client.query_entity_counters( | ||
| # entity='dev3246.region1', | ||
| # key='dns.query_latency_tcp', | ||
| # transform=Transformation.AVERAGE(window=timedelta(seconds=60)), | ||
| # start_ts=now()-timedelta(days=3), | ||
| # end_ts=now(), | ||
| # ) # <--- this bracket is dedented and on a separate line | ||
| dedent_closing_brackets=True | ||
|
|
||
| # Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set | ||
| coalesce_brackets = False | ||
|
|
||
| # Align closing bracket with visual indentation. | ||
| align_closing_bracket_with_visual_indent = False | ||
|
|
||
| # Split named assignments onto individual lines. | ||
| split_before_named_assigns = False | ||
|
|
||
| # If an argument / parameter list is going to be split, then split before the first argument. | ||
| split_before_first_argument = True | ||
|
|
||
| # Split before arguments if the argument list is terminated by a comma. | ||
| split_arguments_when_comma_terminated = False | ||
|
|
||
| # Insert a space between the ending comma and closing bracket of a list, etc. | ||
| space_between_ending_comma_and_closing_bracket = True | ||
|
|
||
| # Join short lines into one line. E.g., single line if statements. | ||
| join_multiple_lines = True | ||
|
|
||
| # Do not include spaces around selected binary operators. | ||
| # Example: 1 + 2 * 3 - 4 / 5 => 1 + 2*3 - 4/5 | ||
| no_spaces_around_selected_binary_operators = True | ||
|
|
||
| # Allow lambdas to be formatted on more than one line. | ||
| allow_multiline_lambdas = True | ||
|
|
||
| SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10 | ||
| SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -83,7 +83,9 @@ def data_to_tfrecord(images, labels, filename): | |
| feature={ | ||
| "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), | ||
| 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])), | ||
| })) | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need shorten~ |
||
| ) | ||
| ) | ||
| writer.write(example.SerializeToString()) # Serialize To String | ||
| writer.close() | ||
|
|
||
|
|
@@ -97,12 +99,13 @@ def read_and_decode(filename, is_train=None): | |
| serialized_example, features={ | ||
| 'label': tf.FixedLenFeature([], tf.int64), | ||
| 'img_raw': tf.FixedLenFeature([], tf.string), | ||
| }) | ||
| } | ||
| ) | ||
| # You can do more image distortion here for training data | ||
| img = tf.decode_raw(features['img_raw'], tf.float32) | ||
| img = tf.reshape(img, [32, 32, 3]) | ||
| # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5 | ||
| if is_train == True: | ||
| if is_train ==True: | ||
| # 1. Randomly crop a [height, width] section of the image. | ||
| img = tf.random_crop(img, [24, 24, 3]) | ||
| # 2. Randomly flip the image horizontally. | ||
|
|
@@ -147,9 +150,12 @@ def read_and_decode(filename, is_train=None): | |
| x_test_, y_test_ = read_and_decode("test.cifar10", False) | ||
|
|
||
| x_train_batch, y_train_batch = tf.train.shuffle_batch( | ||
| [x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) # set the number of threads here | ||
| [x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32 | ||
| ) # set the number of threads here | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need shorten~ |
||
| # for testing, uses batch instead of shuffle_batch | ||
| x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32) | ||
| x_test_batch, y_test_batch = tf.train.batch( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need shorten~ |
||
| [x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32 | ||
| ) | ||
|
|
||
| def model(x_crop, y_, reuse): | ||
| """ For more simplified CNN APIs, check tensorlayer.org """ | ||
|
|
@@ -161,16 +167,28 @@ def model(x_crop, y_, reuse): | |
| net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1') | ||
| net = tl.layers.SignLayer(net) | ||
| net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1') | ||
| net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') | ||
| net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2') | ||
| net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') | ||
| net = tl.layers.LocalResponseNormLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 2 lines |
||
| net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1' | ||
| ) | ||
| net = tl.layers.BinaryConv2d( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need shorten~ |
||
| net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2' | ||
| ) | ||
| net = tl.layers.LocalResponseNormLayer( | ||
| net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2' | ||
| ) | ||
| net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2') | ||
| net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304) | ||
| net = tl.layers.SignLayer(net) | ||
| net = tl.layers.BinaryDenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384) | ||
| net = tl.layers.BinaryDenseLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 2 lines |
||
| net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu' | ||
| ) # output: (batch_size, 384) | ||
| net = tl.layers.SignLayer(net) | ||
| net = tl.layers.BinaryDenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192) | ||
| net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10) | ||
| net = tl.layers.BinaryDenseLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 2 lines |
||
| net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu' | ||
| ) # output: (batch_size, 192) | ||
| net = tl.layers.DenseLayer( | ||
| net, n_units=10, act=tf.identity, W_init=W_init2, name='output' | ||
| ) # output: (batch_size, 10) | ||
| y = net.outputs | ||
|
|
||
| ce = tl.cost.cross_entropy(y, y_, name='cost') | ||
|
|
@@ -201,9 +219,15 @@ def model_batch_norm(x_crop, y_, reuse, is_train): | |
| net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2') | ||
| net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2') | ||
| net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304) | ||
| net = tl.layers.DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384) | ||
| net = tl.layers.DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192) | ||
| net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10) | ||
| net = tl.layers.DenseLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 2 lines |
||
| net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu' | ||
| ) # output: (batch_size, 384) | ||
| net = tl.layers.DenseLayer( | ||
| net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu' | ||
| ) # output: (batch_size, 192) | ||
| net = tl.layers.DenseLayer( | ||
| net, n_units=10, act=tf.identity, W_init=W_init2, name='output' | ||
| ) # output: (batch_size, 10) | ||
| y = net.outputs | ||
|
|
||
| ce = tl.cost.cross_entropy(y, y_, name='cost') | ||
|
|
@@ -273,7 +297,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train): | |
| n_batch += 1 | ||
|
|
||
| if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: | ||
| print("Epoch %d : Step %d-%d of %d took %fs" % (epoch, step, step + n_step_epoch, n_step, time.time() - start_time)) | ||
| print( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 2 lines |
||
| "Epoch %d : Step %d-%d of %d took %fs" % | ||
| (epoch, step, step + n_step_epoch, n_step, time.time() - start_time) | ||
| ) | ||
| print(" train loss: %f" % (train_loss / n_batch)) | ||
| print(" train acc: %f" % (train_acc / n_batch)) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -44,9 +44,15 @@ def model(x, y_, reuse): | |
| # net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], | ||
| # padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64) | ||
| net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304) | ||
| net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384) | ||
| net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192) | ||
| net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10) | ||
| net = DenseLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need arrange |
||
| net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu' | ||
| ) # output: (batch_size, 384) | ||
| net = DenseLayer( | ||
| net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu' | ||
| ) # output: (batch_size, 192) | ||
| net = DenseLayer( | ||
| net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output' | ||
| ) # output: (batch_size, 10) | ||
| y = net.outputs | ||
|
|
||
| ce = tl.cost.cross_entropy(y, y_, name='cost') | ||
|
|
@@ -89,9 +95,15 @@ def model_batch_norm(x, y_, reuse, is_train): | |
| # padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64) | ||
|
|
||
| net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304) | ||
| net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384) | ||
| net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192) | ||
| net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10) | ||
| net = DenseLayer( | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. need arrange |
||
| net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu' | ||
| ) # output: (batch_size, 384) | ||
| net = DenseLayer( | ||
| net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu' | ||
| ) # output: (batch_size, 192) | ||
| net = DenseLayer( | ||
| net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output' | ||
| ) # output: (batch_size, 10) | ||
| y = net.outputs | ||
|
|
||
| ce = tl.cost.cross_entropy(y, y_, name='cost') | ||
|
|
@@ -136,11 +148,8 @@ def distort_fn(x, is_train=False): | |
| return x | ||
|
|
||
|
|
||
| x = tf.placeholder(tf.float32, shape=[None, 24, 24, 3], name='x') | ||
| y_ = tf.placeholder( | ||
| tf.int64, shape=[ | ||
| None, | ||
| ], name='y_') | ||
| x = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x') | ||
| y_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_') | ||
|
|
||
| ## using local response normalization | ||
| # network, cost, _ = model(x, y_, False) | ||
|
|
@@ -156,7 +165,8 @@ def distort_fn(x, is_train=False): | |
| batch_size = 128 | ||
|
|
||
| train_params = network.all_params | ||
| train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params) | ||
| train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, | ||
| use_locking=False).minimize(cost, var_list=train_params) | ||
|
|
||
| tl.layers.initialize_global_variables(sess) | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
need shorten~