This repository has been archived by the owner on Dec 16, 2022. It is now read-only.
/
tensorboard.py
78 lines (68 loc) · 3.12 KB
/
tensorboard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
from typing import Dict, Union, Optional
from tensorboardX import SummaryWriter
import torch
from allennlp.training.callbacks.callback import TrainerCallback
from allennlp.training.callbacks.log_writer import LogWriterCallback
@TrainerCallback.register("tensorboard")
class TensorBoardCallback(LogWriterCallback):
"""
A callback that writes training statistics/metrics to TensorBoard.
"""
def __init__(
self,
serialization_dir: str,
summary_interval: int = 100,
distribution_interval: Optional[int] = None,
batch_size_interval: Optional[int] = None,
should_log_parameter_statistics: bool = False,
should_log_learning_rate: bool = False,
) -> None:
super().__init__(
serialization_dir,
summary_interval=summary_interval,
distribution_interval=distribution_interval,
batch_size_interval=batch_size_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
)
# Create log directories prior to creating SummaryWriter objects
# in order to avoid race conditions during distributed training.
train_ser_dir = os.path.join(self.serialization_dir, "log", "train")
os.makedirs(train_ser_dir, exist_ok=True)
self._train_log = SummaryWriter(train_ser_dir)
val_ser_dir = os.path.join(self.serialization_dir, "log", "validation")
os.makedirs(val_ser_dir, exist_ok=True)
self._validation_log = SummaryWriter(val_ser_dir)
def log_scalars(
self,
scalars: Dict[str, Union[int, float]],
log_prefix: str = "",
epoch: Optional[int] = None,
) -> None:
assert self.trainer is not None
timestep = epoch if epoch is not None else self.trainer._total_batches_completed
log = self._train_log if not log_prefix.startswith("validation") else self._validation_log
for key, value in scalars.items():
name = f"{log_prefix}/{key}" if log_prefix else key
log.add_scalar(name, value, timestep + 1)
def log_tensors(
self, tensors: Dict[str, torch.Tensor], log_prefix: str = "", epoch: Optional[int] = None
) -> None:
assert self.trainer is not None
timestep = epoch if epoch is not None else self.trainer._total_batches_completed
log = self._train_log if not log_prefix.startswith("validation") else self._validation_log
for key, values in tensors.items():
name = f"{log_prefix}/{key}" if log_prefix else key
values_to_write = values.cpu().data.numpy().flatten()
log.add_histogram(name, values_to_write, timestep + 1)
def close(self) -> None:
"""
Calls the `close` method of the `SummaryWriter` s which makes sure that pending
scalars are flushed to disk and the tensorboard event files are closed properly.
"""
super().close()
if self._train_log is not None:
self._train_log.close()
if self._validation_log is not None:
self._validation_log.close()