Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[feat] Model version control using W&B Artifacts #1137

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions mmf/configs/defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,24 @@ training:
wandb:
# Whether to use Weights and Biases Logger, (Default: false)
enabled: false
# An entity is a username or team name where you're sending runs.
# This is necessary if you want to log your metrics to a team account. By default
# it will log the run to your user account.
entity: null
# Project name to be used while logging the experiment with wandb
wandb_projectname: mmf_${oc.env:USER,}
project: mmf
# Experiment/ run name to be used while logging the experiment
# under the project with wandb
wandb_runname: ${training.experiment_name}
name: ${training.experiment_name}
# You can save your model checkpoints as W&B Artifacts for model versioning.
# Set the value to `true` to enable this feature.
log_checkpoint: false
# Specify other argument values that you want to pass to wandb.init(). Check out the documentation
# at https://docs.wandb.ai/ref/python/init to see what arguments are available.
# job_type: 'train'
# tags: ['tag1', 'tag2']



# Size of the batch globally. If distributed or data_parallel
# is used, this will be divided equally among GPUs
Expand Down
8 changes: 4 additions & 4 deletions mmf/trainers/callbacks/logistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,10 @@ def __init__(self, config, trainer):
if env_wandb_logdir:
log_dir = env_wandb_logdir

wandb_projectname = config.training.wandb.wandb_projectname
wandb_runname = config.training.wandb.wandb_runname

self.wandb_logger = WandbLogger(
name=wandb_runname, save_dir=log_dir, project=wandb_projectname
entity=config.training.wandb.entity,
config=config,
project=config.training.wandb.project,
)

def on_train_start(self):
Expand Down Expand Up @@ -153,6 +152,7 @@ def on_test_end(self, **kwargs):
meter=kwargs["meter"],
should_print=prefix,
tb_writer=self.tb_writer,
wandb_logger=self.wandb_logger,
)
logger.info(f"Finished run in {self.total_timer.get_time_since_start()}")

Expand Down
10 changes: 10 additions & 0 deletions mmf/utils/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,7 @@ def save(self, update, iteration=None, update_best=False):
best_metric = (
self.trainer.early_stop_callback.early_stopping.best_monitored_value
)

model = self.trainer.model
data_parallel = registry.get("data_parallel") or registry.get("distributed")
fp16_scaler = getattr(self.trainer, "scaler", None)
Expand Down Expand Up @@ -574,6 +575,15 @@ def save(self, update, iteration=None, update_best=False):
with open_if_main(current_ckpt_filepath, "wb") as f:
self.save_func(ckpt, f)

# Save the current checkpoint as W&B artifacts for model versioning.
if self.config.training.wandb.log_checkpoint:
logger.info(
"Saving current checkpoint as W&B Artifacts for model versioning"
)
self.trainer.logistics_callback.wandb_logger.log_model_checkpoint(
current_ckpt_filepath
)

# Remove old checkpoints if max_to_keep is set
# In XLA, only delete checkpoint files in main process
if self.max_to_keep > 0 and is_main():
Expand Down
54 changes: 40 additions & 14 deletions mmf/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,12 @@ def summarize_report(
if not is_main() and not is_xla():
return

# Log the learning rate if available
if wandb_logger and "lr" in extra:
wandb_logger.log_metrics(
{"train/learning_rate": float(extra["lr"])}, commit=False
)

if tb_writer:
scalar_dict = meter.get_scalar_dict()
tb_writer.add_scalars(scalar_dict, current_iteration)
Expand Down Expand Up @@ -395,21 +401,19 @@ class WandbLogger:
Log using `Weights and Biases`.

Args:
name: Display name for the run.
save_dir: Path where data is saved (./save/logs/wandb/ by default).
project: Display name for the project.
**init_kwargs: Arguments passed to :func:`wandb.init`.
entity: An entity is a username or team name where you're sending runs.
config: Configuration for the run.
project: Name of the W&B project.

Raises:
ImportError: If wandb package is not installed.
"""

def __init__(
self,
name: Optional[str] = None,
save_dir: Optional[str] = None,
entity: Optional[str] = None,
config: Optional[Dict] = None,
project: Optional[str] = None,
**init_kwargs,
):
try:
import wandb
Expand All @@ -420,10 +424,13 @@ def __init__(
)

self._wandb = wandb

self._wandb_init = dict(name=name, project=project, dir=save_dir)

self._wandb_init.update(**init_kwargs)
self._wandb_init = dict(entity=entity, config=config, project=project)
wandb_kwargs = dict(config.training.wandb)
wandb_kwargs.pop("enabled")
wandb_kwargs.pop("entity")
wandb_kwargs.pop("project")
wandb_kwargs.pop("log_checkpoint")
self._wandb_init.update(**wandb_kwargs)

self.setup()

Expand Down Expand Up @@ -453,14 +460,33 @@ def _should_log_wandb(self):
else:
return True

def log_metrics(self, metrics: Dict[str, float]):
def log_metrics(self, metrics: Dict[str, float], commit=True):
"""
Log the monitored metrics to the wand dashboard.

Args:
metrics (Dict[str, float]): [description]
metrics (Dict[str, float]): A dictionary of metrics to log.
commit (bool): Save the metrics dict to the wandb server and
increment the step. (default: True)
"""
if not self._should_log_wandb():
return

self._wandb.log(metrics, commit=commit)

def log_model_checkpoint(self, model_path):
"""
Log the model checkpoint to the wandb dashboard.

Args:
model_path (str): Path to the model file.
"""
if not self._should_log_wandb():
return

self._wandb.log(metrics)
model_artifact = self._wandb.Artifact(
"run_" + self._wandb.run.id + "_model", type="model"
)

model_artifact.add_file(model_path, name="current.pt")
self._wandb.log_artifact(model_artifact, aliases=["latest"])
59 changes: 46 additions & 13 deletions website/docs/notes/logging.md
Original file line number Diff line number Diff line change
@@ -1,42 +1,75 @@
---
id: concepts
title: Terminology and Concepts
sidebar_label: Terminology and Concepts
id: logger
title: Weights and Biases Logging
sidebar_label: Weights and Biases Logging
---

## Weights and Biases Logger

MMF has a `WandbLogger` class which lets the user to log their model's progress using [Weights and Biases](https://gitbook-docs.wandb.ai/).
MMF now has a `WandbLogger` class which lets the user to log their model's progress using [Weights and Biases](https://wandb.ai/site). Enable this logger to automatically log the training/validation metrics, system (GPU and CPU) metrics and configuration parameters.

## First time setup

To set up wandb, run the following:
```
pip install wandb
```
In order to log anything to the W&B server you need to authenticate the machine with W&B **API key**. You can create a new account by going to https://wandb.ai/signup which will generate an API key. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. You only need to supply your key once, and then it is remembered on the same device.

```
wandb login
```

## W&B config parameters

The following options are available in config to enable and customize the wandb logging:
```yaml
training:
# Weights and Biases control, by default Weights and Biases (wandb) is disabled
wandb:
# Whether to use Weights and Biases Logger, (Default: false)
enabled: false
enabled: true
# An entity is a username or team name where you're sending runs.
# This is necessary if you want to log your metrics to a team account. By default
# it will log the run to your user account.
entity: null
# Project name to be used while logging the experiment with wandb
wandb_projectname: mmf_${oc.env:USER}
project: mmf
# Experiment/ run name to be used while logging the experiment
# under the project with wandb
wandb_runname: ${training.experiment_name}
name: ${training.experiment_name}
# Specify other argument values that you want to pass to wandb.init(). Check out the documentation
# at https://docs.wandb.ai/ref/python/init to see what arguments are available.
# job_type: 'train'
# tags: ['tag1', 'tag2']
env:
wandb_logdir: ${env:MMF_WANDB_LOGDIR,}
```
To enable wandb logger the user needs to change the following option in the config.

`training.wandb.enabled=True`
* To enable wandb logger the user needs to change the following option in the config.

`training.wandb.enabled=True`

* To give the `entity` which is the name of the team or the username, the user needs to change the following option in the config. In case no `entity` is provided, the data will be logged to the `entity` set as default in the user's settings.

`training.wandb.entity=<teamname/username>`

* To give the current experiment a project and run name, user should add these config options. The default project name is `mmf` and the default run name is `${training.experiment_name}`.

`training.wandb.project=<ProjectName>` <br />
`training.wandb.name=<RunName>`

* To change the path to the directory where wandb metadata would be stored (Default: `env.log_dir`):

`env.wandb_logdir=<dir_name>`

To give the current experiment a project and run name, user should add these config options.
* To provide extra arguments to `wandb.init()`, the user just needs to define them in the config file. Check out the documentation at https://docs.wandb.ai/ref/python/init to see what arguments are available. An example is shown in the config parameter shown above. Make sure to use the same key name in the config file as defined in the documentation.

`training.wandb.wandb_projectname=<ProjectName> training.wandb.wandb_runname=<RunName>`
## Current features

To change the path to the directory where wandb metadata would be stored (Default: `env.log_dir`):
The following features are currently supported by the `WandbLogger`:

`env.wandb_logdir=<dir_name>`
* Training & Validation metrics
* Learning Rate over time
* GPU: Type, GPU Utilization, power, temperature, CUDA memory usage
* Log configuration parameters