-
Notifications
You must be signed in to change notification settings - Fork 11
/
__init__.py
255 lines (214 loc) · 11 KB
/
__init__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code curated for Bio-Diffusion (https://github.com/BioinfoMachineLearning/bio-diffusion):
# -------------------------------------------------------------------------------------------------------------------------------------
import torch
import os
import os.path
import warnings
import pytorch_lightning as pl
from torchviz import make_dot
from torch.autograd import Variable
from pathlib import Path
from pytorch_lightning import Callback
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_warn, rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import STEP_OUTPUT
from typing import Any, Dict, List, Optional
from torchtyping import patch_typeguard
from typeguard import typechecked
try:
import amp_C
apex_available = True
except Exception:
apex_available = False
from src.utils.pylogger import get_pylogger
from src.utils.rich_utils import enforce_tags, print_config_tree
from src.utils.utils import (
close_loggers,
extras,
get_metric_value,
instantiate_callbacks,
instantiate_loggers,
log_hyperparameters,
save_file,
task_wrapper,
)
patch_typeguard() # use before @typechecked
log = get_pylogger(__name__)
@typechecked
def make_and_save_network_graphviz_plot(
output_var: Variable,
params: Dict[str, Variable],
dot_output_filepath: Path = Path("comp_graph.dot"),
dot_output_format: str = "pdf",
exit_after_save: bool = True,
verbose: bool = True
):
if verbose:
log.info(f"Making Graphviz representation of computational graph...")
g = make_dot(output_var, params=params)
g.render(str(dot_output_filepath), format=dot_output_format, view=False)
if verbose:
log.info(f"Finished making Graphviz representation of computational graph")
if exit_after_save:
exit(0)
class EMA(Callback):
"""
Implements Exponential Moving Averaging (EMA).
When training a model, this callback will maintain moving averages of the trained parameters.
When evaluating, we use the moving averages copy of the trained parameters.
When saving, we save an additional set of parameters with the prefix `ema`.
Args:
decay: The exponential decay used when calculating the moving average. Has to be between 0-1.
apply_ema_every_n_steps: Apply EMA every n global steps.
start_step: Start applying EMA from ``start_step`` global step onwards.
save_ema_weights_in_callback_state: Enable saving EMA weights in callback state.
evaluate_ema_weights_instead: Validate the EMA weights instead of the original weights.
Note this means that when saving the model, the validation metrics are calculated with the EMA weights.
Adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/callbacks/ema.py
"""
def __init__(
self,
decay: float,
apply_ema_every_n_steps: int = 1,
start_step: int = 0,
save_ema_weights_in_callback_state: bool = False,
evaluate_ema_weights_instead: bool = False,
):
if not apex_available:
rank_zero_warn(
"EMA has better performance when Apex is installed: https://github.com/NVIDIA/apex#installation."
)
if not (0 <= decay <= 1):
raise MisconfigurationException("EMA decay value must be between 0 and 1")
self._ema_model_weights: Optional[List[torch.Tensor]] = None
self._overflow_buf: Optional[torch.Tensor] = None
self._cur_step: Optional[int] = None
self._weights_buffer: Optional[List[torch.Tensor]] = None
self.apply_ema_every_n_steps = apply_ema_every_n_steps
self.start_step = start_step
self.save_ema_weights_in_callback_state = save_ema_weights_in_callback_state
self.evaluate_ema_weights_instead = evaluate_ema_weights_instead
self.decay = decay
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
log.info("Creating EMA weights copy.")
if self._ema_model_weights is None:
self._ema_model_weights = [p.detach().clone() for p in pl_module.state_dict().values()]
# ensure that all the weights are on the correct device
self._ema_model_weights = [p.to(pl_module.device) for p in self._ema_model_weights]
self._overflow_buf = torch.IntTensor([0]).to(pl_module.device)
def ema(self, pl_module: "pl.LightningModule") -> None:
if apex_available and pl_module.device.type == "cuda":
return self.apply_multi_tensor_ema(pl_module)
return self.apply_ema(pl_module)
def apply_multi_tensor_ema(self, pl_module: "pl.LightningModule") -> None:
model_weights = list(pl_module.state_dict().values())
amp_C.multi_tensor_axpby(
65536,
self._overflow_buf,
[self._ema_model_weights, model_weights, self._ema_model_weights],
self.decay,
1 - self.decay,
-1,
)
def apply_ema(self, pl_module: "pl.LightningModule") -> None:
for orig_weight, ema_weight in zip(list(pl_module.state_dict().values()), self._ema_model_weights):
if ema_weight.data.dtype != torch.long and orig_weight.data.dtype != torch.long:
# ensure that non-trainable parameters (e.g., feature distributions) are not included in EMA weight averaging
diff = ema_weight.data - orig_weight.data
diff.mul_(1.0 - self.decay)
ema_weight.sub_(diff)
def should_apply_ema(self, step: int) -> bool:
return step != self._cur_step and step >= self.start_step and step % self.apply_ema_every_n_steps == 0
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int
) -> None:
if self.should_apply_ema(trainer.global_step):
self._cur_step = trainer.global_step
self.ema(pl_module)
def state_dict(self) -> Dict[str, Any]:
if self.save_ema_weights_in_callback_state:
return dict(cur_step=self._cur_step, ema_weights=self._ema_model_weights)
return dict(cur_step=self._cur_step)
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self._cur_step = state_dict["cur_step"]
# when loading within apps such as NeMo, EMA weights will be loaded by the experiment manager separately
if self._ema_model_weights is None:
self._ema_model_weights = state_dict.get("ema_weights")
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> None:
checkpoint_callback = trainer.checkpoint_callback
if trainer.ckpt_path and checkpoint_callback is not None:
ext = checkpoint_callback.FILE_EXTENSION
if trainer.ckpt_path.endswith(f"-EMA{ext}"):
log.info(
"loading EMA based weights. "
"The callback will treat the loaded EMA weights as the main weights"
" and create a new EMA copy when training."
)
return
ema_path = trainer.ckpt_path.replace(ext, f"-EMA{ext}")
if os.path.exists(ema_path):
ema_state_dict = torch.load(ema_path, map_location=torch.device("cpu"))
self._ema_model_weights = ema_state_dict["state_dict"].values()
del ema_state_dict
log.info("EMA weights have been loaded successfully. Continuing training with saved EMA weights.")
else:
warnings.warn(
"we were unable to find the associated EMA weights when re-loading, "
"training will start with new EMA weights.",
UserWarning,
)
def replace_model_weights(self, pl_module: "pl.LightningModule") -> None:
self._weights_buffer = [p.detach().clone().to("cpu") for p in pl_module.state_dict().values()]
new_state_dict = {k: v for k, v in zip(pl_module.state_dict().keys(), self._ema_model_weights)}
pl_module.load_state_dict(new_state_dict)
def restore_original_weights(self, pl_module: "pl.LightningModule") -> None:
state_dict = pl_module.state_dict()
new_state_dict = {k: v for k, v in zip(state_dict.keys(), self._weights_buffer)}
pl_module.load_state_dict(new_state_dict)
del self._weights_buffer
@property
def ema_initialized(self) -> bool:
return self._ema_model_weights is not None
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema_initialized and self.evaluate_ema_weights_instead:
self.replace_model_weights(pl_module)
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema_initialized and self.evaluate_ema_weights_instead:
self.restore_original_weights(pl_module)
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema_initialized and self.evaluate_ema_weights_instead:
self.replace_model_weights(pl_module)
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema_initialized and self.evaluate_ema_weights_instead:
self.restore_original_weights(pl_module)
class EMAModelCheckpoint(ModelCheckpoint):
"""
Light wrapper around Lightning's `ModelCheckpoint` to, upon request, save an EMA copy of the model as well.
Adapted from: https://github.com/NVIDIA/NeMo/blob/be0804f61e82dd0f63da7f9fe8a4d8388e330b18/nemo/utils/exp_manager.py#L744
"""
def __init__(self, **kwargs):
# call the parent class constructor with the provided kwargs
super().__init__(**kwargs)
def _get_ema_callback(self, trainer: "pl.Trainer") -> Optional[EMA]:
ema_callback = None
for callback in trainer.callbacks:
if isinstance(callback, EMA):
ema_callback = callback
return ema_callback
def _save_checkpoint(self, trainer: "pl.Trainer", filepath: str) -> None:
super()._save_checkpoint(trainer, filepath)
ema_callback = self._get_ema_callback(trainer)
if ema_callback is not None:
# save EMA copy of the model as well
ema_callback.replace_model_weights(trainer.lightning_module)
filepath = self._ema_format_filepath(filepath)
if self.verbose:
rank_zero_info(f"Saving EMA weights to separate checkpoint {filepath}")
super()._save_checkpoint(trainer, filepath)
ema_callback.restore_original_weights(trainer.lightning_module)
def _ema_format_filepath(self, filepath: str) -> str:
return filepath.replace(self.FILE_EXTENSION, f"-EMA{self.FILE_EXTENSION}")