/
wandb.py
142 lines (118 loc) · 5.03 KB
/
wandb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
"""
Weights and Biases
------------------
"""
import os
from argparse import Namespace
from typing import Optional, List, Dict, Union, Any
import torch.nn as nn
try:
import wandb
from wandb.wandb_run import Run
_WANDB_AVAILABLE = True
except ImportError: # pragma: no-cover
wandb = None
Run = None
_WANDB_AVAILABLE = False
from pytorch_lightning.loggers.base import LightningLoggerBase
from pytorch_lightning.utilities import rank_zero_only
class WandbLogger(LightningLoggerBase):
"""
Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:
.. code-block:: bash
pip install wandb
Args:
name: Display name for the run.
save_dir: Path where data is saved.
offline: Run offline (data can be streamed later to wandb servers).
id: Sets the version, mainly used to resume a previous run.
anonymous: Enables or explicitly disables anonymous logging.
version: Sets the version, mainly used to resume a previous run.
project: The name of the project to which this run will belong.
tags: Tags associated with this run.
log_model: Save checkpoints in wandb dir to upload on W&B servers.
experiment: WandB experiment object
entity: The team posting this run (default: your username or your default team)
group: A unique string shared by all runs in a given group
Example:
>>> from pytorch_lightning.loggers import WandbLogger
>>> from pytorch_lightning import Trainer
>>> wandb_logger = WandbLogger()
>>> trainer = Trainer(logger=wandb_logger)
See Also:
- `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/
Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__
on how to use W&B with Pytorch Lightning.
"""
def __init__(self,
name: Optional[str] = None,
save_dir: Optional[str] = None,
offline: bool = False,
id: Optional[str] = None,
anonymous: bool = False,
version: Optional[str] = None,
project: Optional[str] = None,
tags: Optional[List[str]] = None,
log_model: bool = False,
experiment=None,
entity=None,
group: Optional[str] = None):
if not _WANDB_AVAILABLE:
raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover
' install it with `pip install wandb`.')
super().__init__()
self._name = name
self._save_dir = save_dir
self._anonymous = 'allow' if anonymous else None
self._id = version or id
self._tags = tags
self._project = project
self._experiment = experiment
self._offline = offline
self._entity = entity
self._log_model = log_model
self._group = group
def __getstate__(self):
state = self.__dict__.copy()
# args needed to reload correct experiment
state['_id'] = self._experiment.id if self._experiment is not None else None
# cannot be pickled
state['_experiment'] = None
return state
@property
def experiment(self) -> Run:
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ['WANDB_MODE'] = 'dryrun'
self._experiment = wandb.init(
name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,
group=self._group)
# save checkpoints in wandb dir to upload on W&B servers
if self._log_model:
self.save_dir = self._experiment.dir
return self._experiment
def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
self.experiment.watch(model, log=log, log_freq=log_freq)
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
params = self._convert_params(params)
self.experiment.config.update(params, allow_val_change=True)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)
@property
def name(self) -> Optional[str]:
# don't create an experiment if we don't have one
name = self._experiment.project_name() if self._experiment else None
return name
@property
def version(self) -> Optional[str]:
# don't create an experiment if we don't have one
return self._experiment.id if self._experiment else None