-
Notifications
You must be signed in to change notification settings - Fork 8
/
encoder_vgg16.py
130 lines (105 loc) · 5.45 KB
/
encoder_vgg16.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from keras.models import Sequential
from keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout
from keras.optimizers import Adam, SGD
from keras import regularizers
from keras.callbacks import LearningRateScheduler
import numpy as np
import itertools
import pickle
"""
def step_decay(epoch):
if epoch < 25:
return 0.01
if epoch < 50:
return 0.005
else:
return 0.001
"""
def get_vgg16():
model = Sequential()
# Encoder
# Block 1
model.add(BatchNormalization(axis=3, input_shape=(80, 120, 3)))
#model.add(Dropout(.2))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block1_conv1', input_shape=(80,120,3)))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block1_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(BatchNormalization(axis=3))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block2_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block2_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(BatchNormalization(axis=3))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block3_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block3_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block4_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block4_conv2'))
model.add(MaxPooling2D((2, 3), strides=(2, 3), name='block4_pool'))
# Block 5
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block5_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block5_conv2'))
# Decoder
# Block 6
model.add(UpSampling2D((2, 3), name='block6_upsampl'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block6_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(512, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block6_conv2'))
# Block 7
model.add(UpSampling2D((2, 2), name='block7_upsampl'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block7_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block7_conv2'))
# Block 8
model.add(UpSampling2D((2, 2), name='block8_upsampl'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block8_conv1'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block8_conv2'))
# Block 9
model.add(UpSampling2D((2, 2), name='block9_upsampl'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block9_conv1'))
model.add(BatchNormalization(axis=3))
#model.add(Dropout(.2))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block9_conv2'))
# Output
model.add(BatchNormalization(axis=3))
model.add(Conv2D(1, (1, 1), padding='same', activation='relu', bias_regularizer=regularizers.l1(0.01), name='block10_conv1'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mae', optimizer=sgd, metrics=['mse'])
#model.compile(loss='mae', optimizer=Adam(lr=0.001), metrics=['mse'])
print model.summary()
return model
#x = np.load("/datasets/1980-2016/z_1980_2016.npy")
x = np.load("/datasets/10zlevels.npy")
y = 1000*np.expand_dims(np.load("/datasets/1980-2016/full_tp_1980_2016.npy"), axis=3)
idxs = np.arange(x.shape[0])
np.random.seed(0)
np.random.shuffle(idxs)
x = x[idxs, :, :, :]
y = y[idxs, :]
y_train = y[:40000, :]
y_test = y[40000:, :]
# Levels [1000, 900, 800, 700, 600, 500, 400, 300, 200, 100]
i = 0
j = 2
k = 6
x_train = x[:40000, :, :, [i,j,k]]
x_test = x[40000:, :, :, [i,j,k]]
model = get_vgg16()
history = model.fit(x_train, y_train, epochs=50, verbose=1, validation_data=(x_test, y_test))
with open('trainHistoryDict_vgg16_{}-{}-{}'.format(i, j, k), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
model.save('/datasets/vgg16_0_2_6.h5')