forked from Yacalis/celeba-classification
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Callbacks.py
85 lines (74 loc) · 3.05 KB
/
Callbacks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 16:31:00 2018
@author: Yacalis
"""
from keras.callbacks import TensorBoard, EarlyStopping, ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, History
class Callbacks:
def __init__(self, config, log_dir):
self.callbacks = self.main(config, log_dir)
return
@staticmethod
def main(config: object, log_dir: str) -> list:
# set up tensorboard visualization
tensorboard = TensorBoard(
log_dir=log_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=False,
write_grads=False,
write_images=True
)
# ======================================================================
# if 'monitor' (loss) does not reduce by at least 'min_delta' amount
# within 'patience' number of epochs, stop training
# ======================================================================
earlystopping = EarlyStopping(
monitor='val_acc',
min_delta=config.es_min_delta,
patience=config.es_patience,
verbose=1,
mode='max'
)
# ======================================================================
# if 'monitor' (loss) does not reduce by at least 'epsilon' amount
# within 'patience' number of epochs, multiply the learning rate of the
# model by 'factor', up to a minimum value of 'min_lr'
# ======================================================================
reduce_lr_on_plateau = ReduceLROnPlateau(
monitor='val_acc',
factor=config.lr_factor,
patience=config.lr_patience,
verbose=1,
mode='max',
epsilon=config.lr_epsilon,
cooldown=0,
min_lr=config.lr_min_lr
)
# ======================================================================
# save the state of the model and its weights every 'period' epochs
#
# 'monitor' is a loss value, and when 'save_best_only' is set to True,
# the previously saved checkpoint will only be overwritten if the loss
# of the new checkpoint is better -- but with early stopping, this
# typically won't matter, as training will stop if the model is not
# improving
# ======================================================================
chckpt_fp = log_dir + '/chckpt.ep_{epoch:02d}-loss_{val_loss:.2f}.hdf5'
model_checkpt = ModelCheckpoint(
chckpt_fp,
monitor='val_acc',
verbose=1,
save_best_only=False,
save_weights_only=False,
mode='max',
period=config.period
)
# save history of metrics values, loss (and lr if reduce_lr is present)
history = History()
callbacks = [tensorboard, model_checkpt, history, earlystopping]
if config.change_lr:
callbacks.append(reduce_lr_on_plateau)
return callbacks