-
Notifications
You must be signed in to change notification settings - Fork 15
/
myAutomap_cpu.py
391 lines (324 loc) · 13.9 KB
/
myAutomap_cpu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
import math
import time
import matplotlib.pyplot as plt
from generate_input import load_images_from_folder, load_STONE_data
## Load training data:
#tic1 = time.time()
## Folder with images
#dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon"
#n_cases = (0,1) # load image data from 0 to 1
#X_train, Y_train = load_images_from_folder( # Load images for training
# dir_train,
# n_cases,
# normalize=False,
# imrotate=False)
#toc1 = time.time()
#print('Time to load data = ', (toc1 - tic1))
#print('X_train.shape at input = ', X_train.shape)
#print('Y_train.shape at input = ', Y_train.shape)
# Load training data, cropped and resized from MATLAB
tic1 = time.time()
# Folder with images
dir_train = "/home/mnezafat/Documents/11_AUTOMAP/Dataset"
n_cases = (0,70) # load 3 cases
X_train, Y_train = load_STONE_data( # Load images for training
dir_train,
n_cases,
normalize=False,
imrotate=False)
toc1 = time.time()
print('Time to load data = ', (toc1 - tic1))
print('X_train.shape at input = ', X_train.shape)
print('Y_train.shape at input = ', Y_train.shape)
## Reduce precision point
#X_train = X_train.astype(np.float32)
#Y_train = Y_train.astype(np.float32)
def create_placeholders(n_H0, n_W0):
""" Creates placeholders for x and y for tf.session
:param n_H0: image height
:param n_W0: image width
:return: x and y - tf placeholders
"""
x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x')
y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y')
return x, y
def initialize_parameters():
""" Initializes filters for the convolutional and de-convolutional layers
:return: parameters - a dictionary of filters (W1 - first convolutional
layer, W2 - second convolutional layer, W3 - de-convolutional layer
"""
W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5
initializer=tf.contrib.layers.xavier_initializer
(seed=0))
W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5
initializer=tf.contrib.layers.xavier_initializer
(seed=0))
W3 = tf.get_variable("W3", [7, 7, 64, 1], # 64 filters of size 7x7
initializer=tf.contrib.layers.xavier_initializer
(seed=0)) # set to std conv2d, Chong Duan
parameters = {"W1": W1,
"W2": W2,
"W3": W3}
return parameters
def forward_propagation(x, parameters):
""" Defines all layers for forward propagation:
Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0)
-> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0)
-> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64)
-> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64)
-> De-convolutional: size (n_im, n_H0, n_W0)
:param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2)
:param parameters: parameters of the layers (e.g. filters)
:return: output of the last layer of the neural network
"""
x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2)
n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0)
# with tf.device('/gpu:0'):
with tf.device('/cpu:0'):
# FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0)
FC1 = tf.contrib.layers.fully_connected(
x_temp,
n_out,
activation_fn=tf.tanh,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=None,
biases_regularizer=None,
reuse=tf.AUTO_REUSE,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope='fc1')
with tf.device('/cpu:0'):
# FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0)
FC2 = tf.contrib.layers.fully_connected(
FC1,
n_out,
activation_fn=tf.tanh,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=None,
biases_regularizer=None,
reuse=tf.AUTO_REUSE,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope='fc2')
# Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1):
FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1])
# Retrieve the parameters from the dictionary "parameters":
W1 = parameters['W1']
W2 = parameters['W2']
W3 = parameters['W3']
# CONV2D: filters W1, stride of 1, padding 'SAME'
# Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64)
Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME')
# RELU
CONV1 = tf.nn.relu(Z1)
# CONV2D: filters W2, stride 1, padding 'SAME'
# Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64)
Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME')
# RELU
CONV2 = tf.nn.relu(Z2)
# CONV2 = tf.layers.conv2d(
# CONV1,
# filters=64,
# kernel_size=5,
# strides=(1, 1),
# padding='same',
# data_format='channels_last',
# dilation_rate=(1, 1),
# activation=tf.nn.relu,
# use_bias=True,
# kernel_initializer=None,
# bias_initializer=tf.zeros_initializer(),
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
## activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001),
# kernel_constraint=None,
# bias_constraint=None,
# trainable=True,
# name='conv2',
# reuse=tf.AUTO_REUSE)
# # DE-CONV2D: filters W3, stride 1, padding 'SAME'
# # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1)
# batch_size = tf.shape(x)[0]
# deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1])
# DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape,
# strides=[1, 1, 1, 1], padding='SAME')
# # Use conv for the last layer, Chong Duan
# Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME')
# # RELU
# CONV3 = tf.nn.relu(Z2)
# Apply L1-norm on last hidden layer to the activation as described in the paper
CONV3 = tf.layers.conv2d(
CONV2,
filters=1,
kernel_size=7,
strides=(1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer = None,
# activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001),
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name='conv3',
reuse=tf.AUTO_REUSE)
DECONV = tf.squeeze(CONV3)
return DECONV
def compute_cost(DECONV, Y):
"""
Computes cost (squared loss) between the output of forward propagation and
the label image
:param DECONV: output of forward propagation
:param Y: label image
:return: cost (squared loss)
"""
cost = tf.square(DECONV - Y)
return cost
def random_mini_batches(x, y, mini_batch_size=64, seed=0):
""" Shuffles training examples and partitions them into mini-batches
to speed up the gradient descent
:param x: input frequency space data
:param y: input image space data
:param mini_batch_size: mini-batch size
:param seed: can be chosen to keep the random choice consistent
:return: a mini-batch of size mini_batch_size of training examples
"""
m = x.shape[0] # number of input images
mini_batches = []
np.random.seed(seed)
# Shuffle (x, y)
permutation = list(np.random.permutation(m))
shuffled_X = x[permutation, :]
shuffled_Y = y[permutation, :]
# Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = int(math.floor(
m / mini_batch_size)) # number of mini batches of size mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size
+ mini_batch_size, :, :, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size
+ mini_batch_size, :, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches
* mini_batch_size: m, :, :, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches
* mini_batch_size: m, :, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def model(X_train, Y_train, learning_rate=0.0001,
num_epochs=100, minibatch_size=5, print_cost=True):
""" Runs the forward and backward propagation
:param X_train: input training frequency-space data
:param Y_train: input training image-space data
:param learning_rate: learning rate of gradient descent
:param num_epochs: number of epochs
:param minibatch_size: size of mini-batch
:param print_cost: if True - the cost will be printed every epoch, as well
as how long it took to run the epoch
:return: this function saves the model to a file. The model can then
be used to reconstruct the image from frequency space
"""
with tf.device('/cpu:0'):
ops.reset_default_graph() # to not overwrite tf variables
seed = 3
(m, n_H0, n_W0, _) = X_train.shape
# Create Placeholders
X, Y = create_placeholders(n_H0, n_W0)
# Initialize parameters
parameters = initialize_parameters()
# Build the forward propagation in the tf graph
DECONV = forward_propagation(X, parameters)
# Add cost function to tf graph
cost = compute_cost(DECONV, Y)
# # Backpropagation
# optimizer = tf.train.RMSPropOptimizer(learning_rate,
# decay=0.9,
# momentum=0.0).minimize(cost)
# Backpropagation
# Add global_step variable for save training models - Chong Duan
my_global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.RMSPropOptimizer(learning_rate,
decay=0.9,
momentum=0.0).minimize(cost, global_step = my_global_step)
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables
saver = tf.train.Saver(save_relative_paths=True)
# For memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Memory config
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
config = tf.ConfigProto(log_device_placement=True)
# Start the session to compute the tf graph
with tf.Session(config=config) as sess:
# Initialization
sess.run(init)
# Training loop
learning_curve = []
for epoch in range(num_epochs):
tic = time.time()
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches
seed += 1
minibatches = random_mini_batches(X_train, Y_train,
minibatch_size, seed)
# Minibatch loop
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Run the session to execute the optimizer and the cost
_, temp_cost = sess.run(
[optimizer, cost],
feed_dict={X: minibatch_X, Y: minibatch_Y})
cost_mean = np.mean(temp_cost) / num_minibatches
minibatch_cost += cost_mean
# Print the cost every epoch
learning_curve.append(minibatch_cost)
if print_cost:
toc = time.time()
print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic))
if (epoch + 1) % 10 == 0:
save_path = saver.save(sess, './checkpoints/model.ckpt', global_step = my_global_step)
print("Model saved in file: %s" % save_path)
# # Save the variables to disk.
# save_path = saver.save(sess, './model/' + 'model.ckpt')
# print("Model saved in file: %s" % save_path)
# Plot learning curve
plt.plot(learning_curve)
plt.title('Learning Curve')
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.show()
# Close sess
sess.close()
# Finally run the model!
model(X_train, Y_train,
# learning_rate=0.00002,
learning_rate=0.0001,
num_epochs=500,
minibatch_size=55, # should be < than the number of input examples
print_cost=True)