-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathv3_graph_batch_norm.py
561 lines (459 loc) · 24.3 KB
/
v3_graph_batch_norm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
from __future__ import division
import numpy as np
import pandas as pd
import os
import glob
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import batch_norm
import time
from six.moves import xrange
import shutil
import sys
import argparse
def get_path(directory):
imgs = glob.glob(directory + '/images/*.tif')
imgs.sort()
#a = [x.split('/')[-1].split('.')[0] for x in train]
mask = glob.glob(directory + '/mask/*.gif')
mask.sort()
#b = [x.split('/')[-1].split('.')[0] for x in mask]
gt = glob.glob(directory + '/1st_manual/*.gif')
gt.sort()
#c = [x.split('/')[-1].split('.')[0] for x in gt]
return map(os.path.abspath, imgs), map(os.path.abspath, mask), map(os.path.abspath, gt)
data = None
mean_img = None
# Hyper Params
TOTAL_PATCHES = None
NUM_IMAGES = None
PATCHES_PER_IMAGE = None
PATCH_DIM = None
BATCH_SIZE = 64
LEARNING_RATE = 5e-4
TRAINING_PROP = 0.8
MAX_STEPS = 125
CKPT_STEP = 40
LOSS_STEP = 2
KEEP_PROB = 0.5
NUM_CLASSES = 2
FCHU1 = 512 # Fully connected layer 1 hidden units
MODEL_NAME = '1'
def next_batch(size, df, current_batch_ind):
"""Returns the next mini batch of data from the dataset passed
Args:
size: length of the current requested mini batch
df: the data set consisting of the images and the labels
current_batch_ind: the current position of the index in the dataset
Returns:
(batch_x, batch_y): A tuple of np arrays of dimensions
[size, patch_dim**2*3] and [size, NUM_CLASSES] respectively
current_batch_ind: the updated current position of the index in the dataset
df: when the requested batch_size+current_batch_ind is more than the length of the data set,
the data is shuffled again and current_batch_ind is reset to 0, and this new data set is
returned
"""
if current_batch_ind + size> len(df):
current_batch_ind = 0
df = df.iloc[np.random.permutation(len(df))]
df = df.reset_index(drop=True)
batch_x = np.zeros((size, PATCH_DIM**2*3))
batch_y = np.zeros((size, NUM_CLASSES), dtype = 'uint8')
for i in range(current_batch_ind, current_batch_ind+size):
batch_x[i - current_batch_ind] = df.loc[i][:-1]
batch_y[i - current_batch_ind][int(df.loc[i][PATCH_DIM**2*3])]=1
current_batch_ind += size
return (batch_x, batch_y), current_batch_ind, df
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def batch_norm_layer(x,train_phase,scope_bn):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
"""
bn_train = batch_norm(x, decay=0.999, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, # is this right?
trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=0.999, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, # is this right?
trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def inference(images, keep_prob, fc_hidden_units1, train_phase):
""" Builds the model as far as is required for running the network
forward to make predictions.
Args:
images: Images placeholder, from inputs().
keep_prob: Probability used for Dropout in the final Affine Layer
fc_hidden_units1: Number of hidden neurons in final Affine layer
train_phase: Whether or not the layer is in training mode. In training mode
the batch_norm layers would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
with tf.variable_scope('h_conv1') as scope:
weights = tf.get_variable('weights', shape=[4, 4, 3, 64],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.05))
variable_summaries(weights, scope.name+'/weights')
variable_summaries(biases, scope.name+'/biases')
# Flattening the 3D image into a 1D array
x_image = tf.reshape(images, [-1,PATCH_DIM,PATCH_DIM,3])
x_image_bn = batch_norm_layer(x_image, train_phase, scope.name)
tf.image_summary('input', x_image_bn, 10)
z = tf.nn.conv2d(x_image_bn, weights, strides=[1, 1, 1, 1], padding='VALID') + biases
tf.histogram_summary(scope.name + '/pre_activations', z)
h_conv1 = tf.nn.relu(z, name=scope.name)
tf.histogram_summary(scope.name + '/activations', h_conv1)
with tf.variable_scope('h_conv2') as scope:
weights = tf.get_variable('weights', shape=[4, 4, 64, 64],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.05))
variable_summaries(weights, scope.name+'/weights')
variable_summaries(biases, scope.name+'/biases')
h_conv1_bn = batch_norm_layer(h_conv1, train_phase, scope.name)
z = tf.nn.conv2d(h_conv1_bn, weights, strides=[1, 1, 1, 1], padding='SAME')+biases
tf.histogram_summary(scope.name + '/pre_activations', z)
h_conv2 = tf.nn.relu(z, name=scope.name)
tf.histogram_summary(scope.name + '/activations', h_conv2)
h_pool1 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='h_pool1')
tf.histogram_summary('h_pool1/activations', h_pool1)
with tf.variable_scope('h_conv3') as scope:
weights = tf.get_variable('weights', shape=[4, 4, 64, 64],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = tf.get_variable('biases', shape=[64], initializer=tf.constant_initializer(0.05))
variable_summaries(weights, scope.name+'/weights')
variable_summaries(biases, scope.name+'/biases')
h_pool1_bn = batch_norm_layer(h_pool1, train_phase, scope.name)
z = tf.nn.conv2d(h_pool1_bn, weights, strides=[1, 1, 1, 1], padding='SAME')+biases
tf.histogram_summary(scope.name + '/pre_activations', z)
h_conv3 = tf.nn.relu(z, name=scope.name)
tf.histogram_summary(scope.name + '/activations', h_conv3)
h_pool2 = tf.nn.max_pool(h_conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='h_pool2')
tf.histogram_summary('h_pool2/activations', h_pool2)
with tf.variable_scope('h_fc1') as scope:
weights = tf.get_variable('weights', shape=[7**2*64, fc_hidden_units1],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('biases', shape=[fc_hidden_units1], initializer=tf.constant_initializer(0.05))
variable_summaries(weights, scope.name+'/weights')
variable_summaries(biases, scope.name+'/biases')
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
#h_pool2_flat_bn = batch_norm_layer(h_pool2_flat, train_phase, scope.name)
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights) + biases, name = 'h_fc1')
tf.histogram_summary(scope.name + '/activations', h_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
tf.histogram_summary(scope.name + '/dropout_activations', h_fc1_drop)
with tf.variable_scope('h_fc2') as scope:
weights = tf.get_variable('weights', shape=[fc_hidden_units1, NUM_CLASSES],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('biases', shape=[NUM_CLASSES])
variable_summaries(weights, scope.name+'/weights')
variable_summaries(biases, scope.name+'/biases')
#h_fc1_drop_bn = batch_norm_layer(h_fc1, train_phase, scope.name)
logits = (tf.matmul(h_fc1_drop, weights) + biases)
tf.histogram_summary(scope.name + '/activations', logits)
return logits
def calc_loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_float(labels)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
tf.scalar_summary('cross_entropy_loss', loss)
return loss
def training(loss, learning_rate=5e-4):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the Adam optimizer with the given learning rate.
optimizer = tf.train.AdamOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels, topk=1):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
topk: the number k for 'top-k accuracy'
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, tf.reshape(tf.slice(labels, [0,1], [int(labels.get_shape()[0]), 1]),[-1]), topk)
# Return the number of true entries.
accurate = tf.reduce_sum(tf.cast(correct, tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.scalar_summary('accuracy', accuracy)
return accurate
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, PATCH_DIM**2*3))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size, NUM_CLASSES))
return images_placeholder, labels_placeholder
#UPDATE current_img_ind
def fill_feed_dict(data_set, images_pl, labels_pl, current_img_ind, batch_size, keep_prob, train_phase_pl, is_training):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
current_img_ind: The current position of the index in the dataset
keep_prob: Placeholder for dropout's keep_probability
train_phase_pl: Placeholder of Bool type to indicate if training is currently under progress
is_training: Bool value to indicate if training is currently under progress
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
current_img_ind: The updated position of the index in the dataset
data_set: updated data_set
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size ` examples.
batch, current_img_ind, data_set= next_batch(batch_size, data_set, current_img_ind)
feed_dict = {
images_pl: batch[0],
labels_pl: batch[1],
keep_prob: KEEP_PROB,
train_phase_pl: is_training
}
return feed_dict, current_img_ind, data_set
def do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set, batch_size, keep_prob, train_phase_pl, is_training):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
keep_prob: Placeholder for dropout's keep_probability
train_phase_pl: Placeholder of Bool type to indicate if training is currently under progress
is_training: Bool value to indicate if training is currently under progress
Output:
precision: Accuracy of one evaluation of epoch data
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = len(data_set) // batch_size
num_examples = steps_per_epoch * batch_size
current_img_ind = 0
for step in xrange(steps_per_epoch):
feed_dict, current_img_ind, data_set = fill_feed_dict(data_set, images_placeholder,
labels_placeholder, current_img_ind, batch_size, keep_prob, train_phase_pl, is_training)
feed_dict[keep_prob] = 1.0
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
return precision
def finish_parsing():
global BATCH_SIZE, LEARNING_RATE, TRAINING_PROP, MAX_STEPS, CKPT_STEP, LOSS_STEP, FCHU1, KEEP_PROB, MODEL_NAME
parser = argparse.ArgumentParser(description=
'Training script')
parser.add_argument("--batch", type=int,
help="Batch Size [Default - 64]")
parser.add_argument("--fchu1", type=int,
help="Number of hidden units in FC1 layer [Default - 512]")
parser.add_argument("--learning_rate", type=float,
help="Learning rate for optimiser [Default - 5e-4]")
parser.add_argument("--training_prop", type=float,
help="Proportion of data to be used for training data [Default - 0.8]")
parser.add_argument("--max_steps", type=int,
help="Maximum number of iteration till which the program must run [Default - 100]")
parser.add_argument("--checkpoint_step", type=int,
help="Step after which an evaluation is carried out on validation set and model is saved [Default - 50]")
parser.add_argument("--loss_step", type=int,
help="Step after which loss is printed [Default - 5]")
parser.add_argument("--keep_prob", type=float,
help="Keep Probability for dropout layer [Default - 0.5]")
parser.add_argument("--model_name",
help="Index of the model [Default - '1']")
args = parser.parse_args()
global total_patches, patch_dim, positive_proprtion
if args.batch is not None:
BATCH_SIZE = args.batch
print "New BATCH_SIZE = %d" % BATCH_SIZE
if args.model_name is not None:
MODEL_NAME = args.model_name
print "New MODEL_NAME = %s" % MODEL_NAME
if args.fchu1 is not None:
FCHU1 = args.fchu1
print "New FCHU1 = %d" % FCHU1
if args.learning_rate is not None:
LEARNING_RATE = args.learning_rate
print "New LEARNING_RATE = %.5f" % LEARNING_RATE
if args.training_prop is not None:
TRAINING_PROP = args.training_prop
print "New TRAINING_PROP = %.2f" % TRAINING_PROP
if args.max_steps is not None:
MAX_STEPS = args.max_steps
print "New MAX_STEPS = %d" % MAX_STEPS
if args.checkpoint_step is not None:
CKPT_STEP = args.checkpoint_step
print "New CKPT_STEP = %d" % CKPT_STEP
if args.loss_step is not None:
LOSS_STEP = args.loss_step
print "New LOSS_STEP = %d" % LOSS_STEP
if args.keep_prob is not None:
KEEP_PROB = args.keep_prob
print "New KEEP_PROB = %.2f" % KEEP_PROB
def run_training():
"""Train for a number of steps."""
# Tell TensorFlow that the model will be built into the default Graph.
train_data = data[:int(TRAINING_PROP*len(data))]
test_data = data[int(TRAINING_PROP*len(data)):]
train_data = train_data.reset_index(drop = True)
test_data = test_data.reset_index(drop = True)
validation_accuracy = np.zeros((1,3))
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(BATCH_SIZE)
keep_prob = tf.placeholder(tf.float32)
train_phase = tf.placeholder(tf.bool)
# Build a Graph that computes predictions from the inference model.
logits = inference(images_placeholder, keep_prob, FCHU1, train_phase)
# Add to the Graph the Ops for loss calculation.
loss = calc_loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = training(loss, LEARNING_RATE)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = evaluation(logits, labels_placeholder)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
logs_path = os.path.abspath('../../Data/logs/')
if not os.path.exists(logs_path):
os.mkdir(logs_path)
summary_path = os.path.abspath(logs_path+'/model'+MODEL_NAME+'/')
if os.path.exists(summary_path):
shutil.rmtree(summary_path)
os.mkdir(summary_path)
summary_writer = tf.train.SummaryWriter(summary_path, sess.graph)
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
current_img_ind = 0
# And then after everything is built, start the training loop.
for step in xrange(MAX_STEPS):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict, current_img_ind, train_data = fill_feed_dict(train_data,
images_placeholder,
labels_placeholder,
current_img_ind=current_img_ind,
batch_size=BATCH_SIZE,
keep_prob=keep_prob,
train_phase_pl=train_phase,
is_training=True)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op.
_, summary_str, loss_value = sess.run([train_op, summary_op, loss],
feed_dict=feed_dict)
# Update the events file.
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % LOSS_STEP == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % (CKPT_STEP) == 0 or (step + 1) == MAX_STEPS:
model_path = os.path.abspath('../../Data/models/')
if not os.path.exists(model_path):
os.mkdir(model_path)
model_save_path = os.path.abspath(model_path+'/model'+MODEL_NAME+'/')
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
saver.save(sess, model_save_path+'/model.ckpt', global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
train_acc = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, train_data, BATCH_SIZE, keep_prob,
train_phase, False)
# Evaluate against the validation set.
print('Validation Data Eval:')
valid_acc = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, test_data, BATCH_SIZE, keep_prob,
train_phase, False)
validation_accuracy = np.append(validation_accuracy, np.array([[step, train_acc, valid_acc]]), axis=0)
np.save(os.path.abspath('../../Data/models/model'+MODEL_NAME+'/')+ '/validation_accuracy', validation_accuracy)
def main():
finish_parsing()
global data, mean_img, TOTAL_PATCHES, NUM_IMAGES, PATCHES_PER_IMAGE, PATCH_DIM
print 'Loading data'
data = pd.read_pickle('../../Data/mean_normalised_df_no_class_bias.pkl')
mean_img = pd.read_pickle('../../Data/mean_img_no_class_bias.pkl')
print 'Loading complete'
train, mask_train, gt_train = get_path('../../Data/DRIVE/training')
test, mask_test, mask_gt = get_path('../../Data/DRIVE/test')
# Changing some Hyper Params
TOTAL_PATCHES = len(data)
NUM_IMAGES = len(train)
PATCHES_PER_IMAGE = TOTAL_PATCHES/NUM_IMAGES
PATCH_DIM = int(np.sqrt((len(data.columns)-1)/3))
run_training()
if __name__ == "__main__":
print 'BN after h_fc1(all layers), ditching the dropout layer altogether'
sys.argv = ['v2_graph.py', '--batch', '512', '--fchu1', '256', '--learning_rate', '2e-4',
'--training_prop', '0.9', '--max_steps', '10000',
'--checkpoint_step', '100', '--loss_step', '40', '--keep_prob', '0.6',
'--model_name', '22']
main()