-
Notifications
You must be signed in to change notification settings - Fork 0
/
Keras.py
118 lines (96 loc) · 4.67 KB
/
Keras.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# Note: Some of this code may be out of date due to being written in ~2016
import sys
import time
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
from keras import callbacks
from keras import backend as K
print(time.strftime('%Y/%m/%d %H:%M'))
print('OS:', sys.platform)
print('Python:', sys.version)
print('NumPy:', np.__version__)
print('Keras:', keras.__version__)
########################################################################################################################
### Back end and GPUs
# Printing backend and GPU information
# Used for older versions of keras
from tensorflow.python.client import device_lib
print('Backend: TensorFlow', tf.__version__)
local_device_protos = device_lib.list_local_devices()
print([x for x in local_device_protos if x.device_type == 'GPU'])
# Printing GPU information
K.tensorflow_backend._get_available_gpus()
# Avoiding memory issues with the GPU - older API
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
# Avoiding memory issues with the GPU
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
########################################################################################################################
### Convenience Functions
# One hot encoding
keras.utils.to_categorical(y, num_classes)
########################################################################################################################
### Callbacks
earlystop = callbacks.EarlyStopping(monitor='val_loss',
min_delta=0.0001, # Amount counting as an improvement
patience=5, # Number of epochs before stopping
verbose=1,
mode='auto')
# Tracking the training time for each epoch
class TimeHistory(callbacks.Callback):
'''
Tracks training time on individual epochs for a Keras model
'''
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
time_callback = TimeHistory() # Gives training time for all epochs
# Model checkpoints - saves the model with the best validation loss
model_filepath = 'model.val_loss{val_loss:.5f}_epoch{epoch:04d}-.h5'
checkpoint = callbacks.ModelCheckpoint(model_filepath, monitor='val_loss',
save_best_only=True)
# Reducing the learning rate if training loss does not increase
learning_rate_redux = callbacks.ReduceLROnPlateau(monitor='loss',
patience=3, # Reduce after 3 epochs
verbose=1,
factor=0.3, # Reduce to 1/3
min_lr=0.00001)
# Fitting the model
model_info = model.fit(X_train, y_train,
epochs=epochs,
batch_size=batch_size, verbose=1,
validation_split=0.1, # Uses last 10% of data (not shuffled) for validation
callbacks=[earlystop, checkpoint, learning_rate_redux, time_callback])
########################################################################################################################
### Preprocessing
# Data augmentation
from keras.preprocessing.image import ImageDataGenerator
datagenerator = ImageDataGenerator(
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=None,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
datagenerator.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
model_info = model.fit_generator(generator=datagenerator.flow(X_train, y_train,
batch_size=batch_size,
shuffle=True),
steps_per_epoch=10*round(X_train.shape[0] / batch_size),
epochs=epochs,
validation_data=(X_val, y_val),
verbose=1)