From d4af04107b85d4dbf58d19925c2b757406526948 Mon Sep 17 00:00:00 2001 From: ravitezu Date: Tue, 6 Oct 2020 09:20:33 +0530 Subject: [PATCH 1/3] setup typing for contribute/engine part of the code --- ignite/contrib/engines/common.py | 212 ++++++++++++++++++++----------- ignite/contrib/engines/tbptt.py | 16 ++- 2 files changed, 151 insertions(+), 77 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index c8fed964993a..9569444afcae 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -1,10 +1,12 @@ import numbers import warnings -from collections.abc import Mapping, Sequence from functools import partial +from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Union import torch import torch.nn as nn +from torch.optim.lr_scheduler import _LRScheduler +from torch.optim.optimizer import Optimizer from torch.utils.data.distributed import DistributedSampler import ignite.distributed as idist @@ -20,29 +22,32 @@ WandBLogger, global_step_from_engine, ) +from ignite.contrib.handlers.base_logger import BaseLogger +from ignite.contrib.handlers.param_scheduler import ParamScheduler from ignite.contrib.metrics import GpuInfo from ignite.engine import Engine, Events from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, TerminateOnNan +from ignite.handlers.checkpoint import BaseSaveHandler from ignite.metrics import RunningAverage def setup_common_training_handlers( - trainer, - train_sampler=None, - to_save=None, - save_every_iters=1000, - output_path=None, - lr_scheduler=None, - with_gpu_stats=False, - output_names=None, - with_pbars=True, - with_pbar_on_iters=True, - log_every_iters=100, - device=None, - stop_on_nan=True, - clear_cuda_cache=True, - save_handler=None, - **kwargs + trainer: Engine, + train_sampler: Optional[DistributedSampler] = None, + to_save: Optional[Mapping] = None, + save_every_iters: int = 1000, + output_path: Optional[str] = None, + lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None, + with_gpu_stats: bool = False, + output_names: Optional[Iterable[str]] = None, + with_pbars: bool = True, + with_pbar_on_iters: bool = True, + log_every_iters: int = 100, + device: Optional[Union[str, torch.device]] = None, + stop_on_nan: bool = True, + clear_cuda_cache: bool = True, + save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, + **kwargs: Any ): """Helper method to setup trainer with common handlers (it also supports distributed configuration): - :class:`~ignite.handlers.TerminateOnNan` @@ -119,20 +124,20 @@ class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Check def _setup_common_training_handlers( - trainer, - to_save=None, - save_every_iters=1000, - output_path=None, - lr_scheduler=None, - with_gpu_stats=False, - output_names=None, - with_pbars=True, - with_pbar_on_iters=True, - log_every_iters=100, - stop_on_nan=True, - clear_cuda_cache=True, - save_handler=None, - **kwargs + trainer: Engine, + to_save: Optional[Mapping] = None, + save_every_iters: int = 1000, + output_path: Optional[str] = None, + lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None, + with_gpu_stats: bool = False, + output_names: Optional[Iterable[str]] = None, + with_pbars: bool = True, + with_pbar_on_iters: bool = True, + log_every_iters: int = 100, + stop_on_nan: bool = True, + clear_cuda_cache: bool = True, + save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, + **kwargs: Any ): if output_path is not None and save_handler is not None: raise ValueError( @@ -200,21 +205,21 @@ def output_transform(x, index, name): def _setup_common_distrib_training_handlers( - trainer, - train_sampler=None, - to_save=None, - save_every_iters=1000, - output_path=None, - lr_scheduler=None, - with_gpu_stats=False, - output_names=None, - with_pbars=True, - with_pbar_on_iters=True, - log_every_iters=100, - stop_on_nan=True, - clear_cuda_cache=True, - save_handler=None, - **kwargs + trainer: Engine, + train_sampler: Optional[DistributedSampler] = None, + to_save: Optional[Mapping] = None, + save_every_iters: int = 1000, + output_path: Optional[str] = None, + lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None, + with_gpu_stats: bool = False, + output_names: Optional[Iterable[str]] = None, + with_pbars: bool = True, + with_pbar_on_iters: bool = True, + log_every_iters: int = 100, + stop_on_nan: bool = True, + clear_cuda_cache: bool = True, + save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, + **kwargs: Any ): _setup_common_training_handlers( @@ -257,10 +262,14 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo ) -def _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters): +def _setup_logging( + logger: BaseLogger, + trainer: Engine, + optimizers: Union[Optimizer, Dict[str, Optimizer]], + evaluators: Union[Engine, Dict[str, Engine]], + log_every_iters: int, +): if optimizers is not None: - from torch.optim.optimizer import Optimizer - if not isinstance(optimizers, (Optimizer, Mapping)): raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers") @@ -298,7 +307,14 @@ def _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters): ) -def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_tb_logging( + output_path: str, + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -316,14 +332,20 @@ def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.tensorboard_logger.TensorboardLogger` + TensorboardLogger """ logger = TensorboardLogger(log_dir=output_path, **kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_visdom_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -340,14 +362,20 @@ def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_it **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger` + VisdomLogger """ logger = VisdomLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_mlflow_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -364,14 +392,20 @@ def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_it **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.mlflow_logger.MLflowLogger` + MLflowLogger """ logger = MLflowLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_neptune_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_neptune_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -388,14 +422,20 @@ def setup_neptune_logging(trainer, optimizers=None, evaluators=None, log_every_i **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.neptune_logger.NeptuneLogger` + NeptuneLogger """ logger = NeptuneLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_wandb_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_wandb_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -412,14 +452,20 @@ def setup_wandb_logging(trainer, optimizers=None, evaluators=None, log_every_ite **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.wandb_logger.WandBLogger` + WandBLogger """ logger = WandBLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_plx_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -436,14 +482,20 @@ def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.polyaxon_logger.PolyaxonLogger` + PolyaxonLogger """ logger = PolyaxonLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def setup_trains_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs): +def setup_trains_logging( + trainer: Engine, + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, + evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, + log_every_iters: int = 100, + **kwargs: Any +): """Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) @@ -460,15 +512,15 @@ def setup_trains_logging(trainer, optimizers=None, evaluators=None, log_every_it **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.trains_logger.TrainsLogger` + TrainsLogger """ logger = TrainsLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger -def get_default_score_fn(metric_name): - def wrapper(engine): +def get_default_score_fn(metric_name: str): + def wrapper(engine: Engine): score = engine.state.metrics[metric_name] return score @@ -476,7 +528,14 @@ def wrapper(engine): def gen_save_best_models_by_val_score( - save_handler, evaluator, models, metric_name, n_saved=3, trainer=None, tag="val", **kwargs + save_handler: Union[Callable, BaseSaveHandler], + evaluator: Engine, + models: Union[torch.nn.Module, Dict[str, torch.nn.Module]], + metric_name: str, + n_saved: int = 3, + trainer: Optional[Engine] = None, + tag: str = "val", + **kwargs: Any ): """Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). @@ -501,7 +560,7 @@ def gen_save_best_models_by_val_score( **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: - A :class:`~ignite.handlers.Checkpoint` handler. + A :class:`~ignite.handlers.checkpoint.Checkpoint` handler. """ global_step_transform = None if trainer is not None: @@ -521,15 +580,20 @@ def gen_save_best_models_by_val_score( score_function=get_default_score_fn(metric_name), **kwargs, ) - evaluator.add_event_handler( - Events.COMPLETED, best_model_handler, - ) + evaluator.add_event_handler(Events.COMPLETED, best_model_handler) return best_model_handler def save_best_model_by_val_score( - output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag="val", **kwargs + output_path: str, + evaluator: Engine, + model: torch.nn.Module, + metric_name: str, + n_saved: int = 3, + trainer: Optional[Engine] = None, + tag: str = "val", + **kwargs: Any ): """Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). @@ -547,7 +611,7 @@ def save_best_model_by_val_score( **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: - A :class:`~ignite.handlers.Checkpoint` handler. + A :class:`~ignite.handlers.checkpoint.Checkpoint` handler. """ return gen_save_best_models_by_val_score( save_handler=DiskSaver(dirname=output_path, require_empty=False), @@ -561,7 +625,7 @@ def save_best_model_by_val_score( ) -def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name): +def add_early_stopping_by_val_score(patience: int, evaluator: Engine, trainer: Engine, metric_name: str): """Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`. Metric value should increase in order to keep training and not early stop. @@ -573,7 +637,7 @@ def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name): `evaluator.state.metrics`. Returns: - A :class:`~ignite.handlers.EarlyStopping` handler. + A :class:`~ignite.handlers.early_stopping.EarlyStopping` handler. """ es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer) evaluator.add_event_handler(Events.COMPLETED, es_handler) diff --git a/ignite/contrib/engines/tbptt.py b/ignite/contrib/engines/tbptt.py index f959673c2b0c..d6ef72b26e43 100644 --- a/ignite/contrib/engines/tbptt.py +++ b/ignite/contrib/engines/tbptt.py @@ -1,6 +1,9 @@ # coding: utf-8 +from typing import Callable, Mapping, Optional, Sequence, Union import torch +import torch.nn as nn +from torch.optim.optimizer import Optimizer from ignite.engine import Engine, EventEnum, _prepare_batch from ignite.utils import apply_to_tensor @@ -17,7 +20,7 @@ class Tbptt_Events(EventEnum): TIME_ITERATION_COMPLETED = "time_iteration_completed" -def _detach_hidden(hidden): +def _detach_hidden(hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]): """Cut backpropagation graph. Auxillary function to cut the backpropagation graph by detaching the hidden @@ -27,7 +30,14 @@ def _detach_hidden(hidden): def create_supervised_tbptt_trainer( - model, optimizer, loss_fn, tbtt_step, dim=0, device=None, non_blocking=False, prepare_batch=_prepare_batch + model: nn.Module, + optimizer: Optimizer, + loss_fn: nn.Module, + tbtt_step: int, + dim: int = 0, + device: Optional[str] = None, + non_blocking: bool = False, + prepare_batch: Callable = _prepare_batch, ): """Create a trainer for truncated backprop through time supervised models. @@ -73,7 +83,7 @@ def create_supervised_tbptt_trainer( """ - def _update(engine, batch): + def _update(engine: Engine, batch: Sequence[torch.Tensor]): loss_list = [] hidden = None From 4afd0f12251d57e7064b53eb204417875f1121aa Mon Sep 17 00:00:00 2001 From: ravitezu Date: Tue, 6 Oct 2020 20:49:14 +0530 Subject: [PATCH 2/3] revert doc string changes --- ignite/contrib/engines/common.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 9569444afcae..9dcaef20b769 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -332,7 +332,7 @@ def setup_tb_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - TensorboardLogger + :class:`~ignite.contrib.handlers.tensorboard_logger.TensorboardLogger` """ logger = TensorboardLogger(log_dir=output_path, **kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -362,7 +362,7 @@ def setup_visdom_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - VisdomLogger + :class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger` """ logger = VisdomLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -392,7 +392,7 @@ def setup_mlflow_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - MLflowLogger + :class:`~ignite.contrib.handlers.mlflow_logger.MLflowLogger` """ logger = MLflowLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -422,7 +422,7 @@ def setup_neptune_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - NeptuneLogger + :class:`~ignite.contrib.handlers.neptune_logger.NeptuneLogger` """ logger = NeptuneLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -452,7 +452,7 @@ def setup_wandb_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - WandBLogger + :class:`~ignite.contrib.handlers.wandb_logger.WandBLogger` """ logger = WandBLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -482,7 +482,7 @@ def setup_plx_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - PolyaxonLogger + :class:`~ignite.contrib.handlers.polyaxon_logger.PolyaxonLogger` """ logger = PolyaxonLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -512,7 +512,7 @@ def setup_trains_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - TrainsLogger + :class:`~ignite.contrib.handlers.trains_logger.TrainsLogger` """ logger = TrainsLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) @@ -560,7 +560,7 @@ def gen_save_best_models_by_val_score( **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: - A :class:`~ignite.handlers.checkpoint.Checkpoint` handler. + A :class:`~ignite.handlers.Checkpoint` handler. """ global_step_transform = None if trainer is not None: @@ -611,7 +611,7 @@ def save_best_model_by_val_score( **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. Returns: - A :class:`~ignite.handlers.checkpoint.Checkpoint` handler. + A :class:`~ignite.handlers.Checkpoint` handler. """ return gen_save_best_models_by_val_score( save_handler=DiskSaver(dirname=output_path, require_empty=False), @@ -637,7 +637,7 @@ def add_early_stopping_by_val_score(patience: int, evaluator: Engine, trainer: E `evaluator.state.metrics`. Returns: - A :class:`~ignite.handlers.early_stopping.EarlyStopping` handler. + A :class:`~ignite.handlers.EarlyStopping` handler. """ es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer) evaluator.add_event_handler(Events.COMPLETED, es_handler) From 72981bcc8a4fb4bd1b55185eeee2c39963693f2a Mon Sep 17 00:00:00 2001 From: vfdev Date: Tue, 6 Oct 2020 21:42:17 +0200 Subject: [PATCH 3/3] Update common.py --- ignite/contrib/engines/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 9dcaef20b769..ed7169fd5b3c 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -362,7 +362,7 @@ def setup_visdom_logging( **kwargs: optional keyword args to be passed to construct the logger. Returns: - :class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger` + :class:`~ignite.contrib.handlers.visdom_logger.VisdomLogger` """ logger = VisdomLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters)