Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FIX] Allow 0 to be passed as a config to print,write,plot,checkpoint_every and disable in that case #511

Merged
merged 7 commits into from
Aug 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 16 additions & 4 deletions qadence/ml_tools/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,25 @@ class TrainConfig:
max_iter: int = 10000
"""Number of training iterations."""
print_every: int = 1000
"""Print loss/metrics."""
"""Print loss/metrics.

Set to 0 to disable
"""
write_every: int = 50
"""Write loss and metrics with the tracking tool."""
"""Write loss and metrics with the tracking tool.

Set to 0 to disable
"""
checkpoint_every: int = 5000
"""Write model/optimizer checkpoint."""
"""Write model/optimizer checkpoint.

Set to 0 to disable
"""
plot_every: int = 5000
"""Write figures."""
"""Write figures.

Set to 0 to disable
"""
log_model: bool = False
"""Logs a serialised version of the model."""
folder: Path | None = None
Expand Down
24 changes: 13 additions & 11 deletions qadence/ml_tools/train_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,7 @@
from logging import getLogger
from typing import Callable, Union

from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
from torch import complex128, float32, float64
from torch import device as torch_device
from torch import dtype as torch_dtype
Expand Down Expand Up @@ -231,18 +225,22 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d
"You can use e.g. `qadence.ml_tools.to_dataloader` to build a dataloader."
)

if iteration % config.print_every == 0 and config.verbose:
if (
config.print_every > 0
and iteration % config.print_every == 0
and config.verbose
):
# Note that the loss returned by optimize_step
# is the value before doing the training step
# which is printed accordingly by the previous iteration number
print_metrics(loss, metrics, iteration - 1)

if iteration % config.write_every == 0:
if config.write_every > 0 and iteration % config.write_every == 0:
write_tracker(
writer, loss, metrics, iteration, tracking_tool=config.tracking_tool
)

if iteration % config.plot_every == 0:
if config.plot_every > 0 and iteration % config.plot_every == 0:
RolandMacDoland marked this conversation as resolved.
Show resolved Hide resolved
plot_tracker(
writer,
model,
Expand All @@ -265,7 +263,11 @@ def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, d
)

if config.folder:
if iteration % config.checkpoint_every == 0 and not config.checkpoint_best_only:
if (
config.checkpoint_every > 0
and iteration % config.checkpoint_every == 0
and not config.checkpoint_best_only
):
write_checkpoint(config.folder, model, optimizer, iteration)

except KeyboardInterrupt:
Expand Down
16 changes: 5 additions & 11 deletions qadence/ml_tools/train_no_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,7 @@

import nevergrad as ng
from nevergrad.optimization.base import Optimizer as NGOptimizer
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
Expand Down Expand Up @@ -114,13 +108,13 @@ def _update_parameters(
else:
raise NotImplementedError("Unsupported dataloader type!")

if iteration % config.print_every == 0 and config.verbose:
if config.print_every > 0 and iteration % config.print_every == 0 and config.verbose:
print_metrics(loss, metrics, iteration)

if iteration % config.write_every == 0:
if config.write_every > 0 and iteration % config.write_every == 0:
write_tracker(writer, loss, metrics, iteration, tracking_tool=config.tracking_tool)

if iteration % config.plot_every == 0:
if config.plot_every > 0 and iteration % config.plot_every == 0:
RolandMacDoland marked this conversation as resolved.
Show resolved Hide resolved
plot_tracker(
writer,
model,
Expand All @@ -130,7 +124,7 @@ def _update_parameters(
)

if config.folder:
if iteration % config.checkpoint_every == 0:
if config.checkpoint_every > 0 and iteration % config.checkpoint_every == 0:
write_checkpoint(config.folder, model, optimizer, iteration)

if iteration >= init_iter + config.max_iter:
Expand Down