From 35ed512a2cee3556a83aa2f627e8cd40dedaad2b Mon Sep 17 00:00:00 2001 From: Rostyslav Zatserkovnyi Date: Wed, 28 Oct 2020 18:03:20 +0200 Subject: [PATCH 1/4] Activate mypy in ignite.contrib.engines --- ignite/contrib/engines/common.py | 81 ++++++++++++++++++++------------ mypy.ini | 4 -- 2 files changed, 52 insertions(+), 33 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 88c637545c7a..cffe8c9e1eeb 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -1,7 +1,7 @@ import numbers import warnings from functools import partial -from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Union +from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Union, cast import torch import torch.nn as nn @@ -88,24 +88,24 @@ class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Check **kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`. """ - _kwargs = dict( - to_save=to_save, - save_every_iters=save_every_iters, - output_path=output_path, - lr_scheduler=lr_scheduler, - with_gpu_stats=with_gpu_stats, - output_names=output_names, - with_pbars=with_pbars, - with_pbar_on_iters=with_pbar_on_iters, - log_every_iters=log_every_iters, - stop_on_nan=stop_on_nan, - clear_cuda_cache=clear_cuda_cache, - save_handler=save_handler, - ) - _kwargs.update(kwargs) - if idist.get_world_size() > 1: - _setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **_kwargs) + _setup_common_distrib_training_handlers( + trainer, + train_sampler=train_sampler, + to_save=to_save, + save_every_iters=save_every_iters, + output_path=output_path, + lr_scheduler=lr_scheduler, + with_gpu_stats=with_gpu_stats, + output_names=output_names, + with_pbars=with_pbars, + with_pbar_on_iters=with_pbar_on_iters, + log_every_iters=log_every_iters, + stop_on_nan=stop_on_nan, + clear_cuda_cache=clear_cuda_cache, + save_handler=save_handler, + **kwargs, + ) else: if train_sampler is not None and isinstance(train_sampler, DistributedSampler): warnings.warn( @@ -114,7 +114,22 @@ class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Check "Train sampler argument will be ignored", UserWarning, ) - _setup_common_training_handlers(trainer, **_kwargs) + _setup_common_training_handlers( + trainer, + to_save=to_save, + save_every_iters=save_every_iters, + output_path=output_path, + lr_scheduler=lr_scheduler, + with_gpu_stats=with_gpu_stats, + output_names=output_names, + with_pbars=with_pbars, + with_pbar_on_iters=with_pbar_on_iters, + log_every_iters=log_every_iters, + stop_on_nan=stop_on_nan, + clear_cuda_cache=clear_cuda_cache, + save_handler=save_handler, + **kwargs, + ) setup_common_distrib_training_handlers = setup_common_training_handlers @@ -146,7 +161,10 @@ def _setup_common_training_handlers( if lr_scheduler is not None: if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler): - trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step()) + trainer.add_event_handler( + Events.ITERATION_COMPLETED, + lambda engine: cast(torch.optim.lr_scheduler._LRScheduler, lr_scheduler).step(), + ) elif isinstance(lr_scheduler, LRScheduler): trainer.add_event_handler(Events.ITERATION_COMPLETED, lr_scheduler) else: @@ -164,11 +182,15 @@ def _setup_common_training_handlers( if output_path is not None: save_handler = DiskSaver(dirname=output_path, require_empty=False) - checkpoint_handler = Checkpoint(to_save, save_handler, filename_prefix="training", **kwargs) - trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler) + checkpoint_handler = Checkpoint( + to_save, cast(Union[Callable, BaseSaveHandler], save_handler), filename_prefix="training", **kwargs + ) + trainer.add_event_handler(cast(Events, Events.ITERATION_COMPLETED(every=save_every_iters)), checkpoint_handler) if with_gpu_stats: - GpuInfo().attach(trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)) + GpuInfo().attach( + trainer, name="gpu", event_name=cast(Events, Events.ITERATION_COMPLETED(every=log_every_iters)) + ) if output_names is not None: @@ -193,7 +215,7 @@ def output_transform(x, index, name): if with_pbars: if with_pbar_on_iters: ProgressBar(persist=False).attach( - trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) + trainer, metric_names="all", event_name=cast(Events, Events.ITERATION_COMPLETED(every=log_every_iters)) ) ProgressBar(persist=True, bar_format="").attach( @@ -262,8 +284,8 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo def _setup_logging( logger: BaseLogger, trainer: Engine, - optimizers: Union[Optimizer, Dict[str, Optimizer]], - evaluators: Union[Engine, Dict[str, Engine]], + optimizers: Union[Optimizer, Dict[str, Optimizer], None], + evaluators: Union[Engine, Dict[str, Engine], None], log_every_iters: int, ): if optimizers is not None: @@ -284,11 +306,12 @@ def _setup_logging( if optimizers is not None: # Log optimizer parameters if isinstance(optimizers, Optimizer): - optimizers = {None: optimizers} + optimizers = {"": optimizers} for k, optimizer in optimizers.items(): + tag = None if k == "" else k logger.attach_opt_params_handler( - trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=k + trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=tag ) if evaluators is not None: @@ -575,7 +598,7 @@ def gen_save_best_models_by_val_score( to_save = {"model": models} best_model_handler = Checkpoint( - to_save, + cast(Dict[str, torch.nn.Module], to_save), save_handler, filename_prefix="best", n_saved=n_saved, diff --git a/mypy.ini b/mypy.ini index bd790c5ecc8c..62b36053b343 100644 --- a/mypy.ini +++ b/mypy.ini @@ -28,10 +28,6 @@ warn_unused_ignores = True ignore_errors = True -[mypy-ignite.contrib.engines.*] - -ignore_errors = True - [mypy-horovod.*] ignore_missing_imports = True From e54fd5affb87c96c0ab9906a902e4769904805d3 Mon Sep 17 00:00:00 2001 From: Rostyslav Zatserkovnyi Date: Thu, 29 Oct 2020 09:26:00 +0200 Subject: [PATCH 2/4] Fix review comments --- ignite/contrib/engines/common.py | 61 +++++++++++++++++--------------- ignite/contrib/engines/tbptt.py | 7 ++-- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index cffe8c9e1eeb..92fef52e4057 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -47,7 +47,7 @@ def setup_common_training_handlers( clear_cuda_cache: bool = True, save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, **kwargs: Any -): +) -> None: """Helper method to setup trainer with common handlers (it also supports distributed configuration): - :class:`~ignite.handlers.TerminateOnNan` @@ -150,7 +150,7 @@ def _setup_common_training_handlers( clear_cuda_cache: bool = True, save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, **kwargs: Any -): +) -> None: if output_path is not None and save_handler is not None: raise ValueError( "Arguments output_path and save_handler are mutually exclusive. Please, define only one of them" @@ -162,8 +162,7 @@ def _setup_common_training_handlers( if lr_scheduler is not None: if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler): trainer.add_event_handler( - Events.ITERATION_COMPLETED, - lambda engine: cast(torch.optim.lr_scheduler._LRScheduler, lr_scheduler).step(), + Events.ITERATION_COMPLETED, lambda engine: cast(_LRScheduler, lr_scheduler).step() ) elif isinstance(lr_scheduler, LRScheduler): trainer.add_event_handler(Events.ITERATION_COMPLETED, lr_scheduler) @@ -185,11 +184,13 @@ def _setup_common_training_handlers( checkpoint_handler = Checkpoint( to_save, cast(Union[Callable, BaseSaveHandler], save_handler), filename_prefix="training", **kwargs ) - trainer.add_event_handler(cast(Events, Events.ITERATION_COMPLETED(every=save_every_iters)), checkpoint_handler) + trainer.add_event_handler( + Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler + ) # type: ignore[arg-type] if with_gpu_stats: GpuInfo().attach( - trainer, name="gpu", event_name=cast(Events, Events.ITERATION_COMPLETED(every=log_every_iters)) + trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) # type: ignore[arg-type] ) if output_names is not None: @@ -239,7 +240,7 @@ def _setup_common_distrib_training_handlers( clear_cuda_cache: bool = True, save_handler: Optional[Union[Callable, BaseSaveHandler]] = None, **kwargs: Any -): +) -> None: _setup_common_training_handlers( trainer, @@ -267,14 +268,14 @@ def distrib_set_epoch(engine): train_sampler.set_epoch(engine.state.epoch - 1) -def empty_cuda_cache(_): +def empty_cuda_cache(_) -> None: torch.cuda.empty_cache() import gc gc.collect() -def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters): +def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters) -> None: raise DeprecationWarning( "ignite.contrib.engines.common.setup_any_logging is deprecated since 0.4.0. and will be remove in 0.6.0. " "Please use instead: setup_tb_logging, setup_visdom_logging or setup_mlflow_logging etc." @@ -284,10 +285,10 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo def _setup_logging( logger: BaseLogger, trainer: Engine, - optimizers: Union[Optimizer, Dict[str, Optimizer], None], - evaluators: Union[Engine, Dict[str, Engine], None], + optimizers: Optional[Union[Optimizer, Dict[str, Optimizer], Dict[None, Optimizer]]], + evaluators: Optional[Union[Engine, Dict[str, Engine]]], log_every_iters: int, -): +) -> None: if optimizers is not None: if not isinstance(optimizers, (Optimizer, Mapping)): raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers") @@ -306,12 +307,11 @@ def _setup_logging( if optimizers is not None: # Log optimizer parameters if isinstance(optimizers, Optimizer): - optimizers = {"": optimizers} + optimizers = {None: optimizers} for k, optimizer in optimizers.items(): - tag = None if k == "" else k logger.attach_opt_params_handler( - trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=tag + trainer, Events.ITERATION_STARTED(every=log_every_iters), optimizer, param_name="lr", tag=k ) if evaluators is not None: @@ -334,7 +334,7 @@ def setup_tb_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> TensorboardLogger: """Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -366,7 +366,7 @@ def setup_visdom_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> VisdomLogger: """Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -397,7 +397,7 @@ def setup_mlflow_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> MLflowLogger: """Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -428,7 +428,7 @@ def setup_neptune_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> NeptuneLogger: """Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -459,7 +459,7 @@ def setup_wandb_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> WandBLogger: """Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -490,7 +490,7 @@ def setup_plx_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> PolyaxonLogger: """Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -521,7 +521,7 @@ def setup_trains_logging( evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any -): +) -> TrainsLogger: """Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values @@ -546,7 +546,7 @@ def setup_trains_logging( return logger -def get_default_score_fn(metric_name: str): +def get_default_score_fn(metric_name: str) -> Any: def wrapper(engine: Engine): score = engine.state.metrics[metric_name] return score @@ -563,7 +563,7 @@ def gen_save_best_models_by_val_score( trainer: Optional[Engine] = None, tag: str = "val", **kwargs: Any -): +) -> Checkpoint: """Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). Models with highest metric value will be retained. The logic of how to store objects is delegated to @@ -593,12 +593,13 @@ def gen_save_best_models_by_val_score( if trainer is not None: global_step_transform = global_step_from_engine(trainer) - to_save = models if isinstance(models, nn.Module): - to_save = {"model": models} + to_save = {"model": models} # type: Dict[str, nn.Module] + else: + to_save = models best_model_handler = Checkpoint( - cast(Dict[str, torch.nn.Module], to_save), + to_save, save_handler, filename_prefix="best", n_saved=n_saved, @@ -621,7 +622,7 @@ def save_best_model_by_val_score( trainer: Optional[Engine] = None, tag: str = "val", **kwargs: Any -): +) -> Checkpoint: """Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric (named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``). Models with highest metric value will be retained. @@ -652,7 +653,9 @@ def save_best_model_by_val_score( ) -def add_early_stopping_by_val_score(patience: int, evaluator: Engine, trainer: Engine, metric_name: str): +def add_early_stopping_by_val_score( + patience: int, evaluator: Engine, trainer: Engine, metric_name: str +) -> EarlyStopping: """Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`. Metric value should increase in order to keep training and not early stop. diff --git a/ignite/contrib/engines/tbptt.py b/ignite/contrib/engines/tbptt.py index d6ef72b26e43..4dadf4b190fb 100644 --- a/ignite/contrib/engines/tbptt.py +++ b/ignite/contrib/engines/tbptt.py @@ -1,4 +1,5 @@ # coding: utf-8 +import collections.abc as collections from typing import Callable, Mapping, Optional, Sequence, Union import torch @@ -20,7 +21,9 @@ class Tbptt_Events(EventEnum): TIME_ITERATION_COMPLETED = "time_iteration_completed" -def _detach_hidden(hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]): +def _detach_hidden( + hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes] +) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: """Cut backpropagation graph. Auxillary function to cut the backpropagation graph by detaching the hidden @@ -38,7 +41,7 @@ def create_supervised_tbptt_trainer( device: Optional[str] = None, non_blocking: bool = False, prepare_batch: Callable = _prepare_batch, -): +) -> Engine: """Create a trainer for truncated backprop through time supervised models. Training recurrent model on long sequences is computationally intensive as From 81d04016a08d3de7fd8a514c8a4afdbf6ab7aeba Mon Sep 17 00:00:00 2001 From: Rostyslav Zatserkovnyi Date: Thu, 29 Oct 2020 09:28:31 +0200 Subject: [PATCH 3/4] fix extra event too --- ignite/contrib/engines/common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index 92fef52e4057..accb97cd25e3 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -216,7 +216,9 @@ def output_transform(x, index, name): if with_pbars: if with_pbar_on_iters: ProgressBar(persist=False).attach( - trainer, metric_names="all", event_name=cast(Events, Events.ITERATION_COMPLETED(every=log_every_iters)) + trainer, + metric_names="all", + event_name=Events.ITERATION_COMPLETED(every=log_every_iters), # type: ignore[arg-type] ) ProgressBar(persist=True, bar_format="").attach( From 794bce68677bf0970d8987291bd700cb41143b1a Mon Sep 17 00:00:00 2001 From: Rostyslav Zatserkovnyi Date: Thu, 12 Nov 2020 00:20:06 +0200 Subject: [PATCH 4/4] Update to fix strict errors --- ignite/contrib/engines/common.py | 22 +++++++++------------- ignite/contrib/engines/tbptt.py | 2 +- ignite/contrib/handlers/tqdm_logger.py | 2 +- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py index accb97cd25e3..78228537c636 100644 --- a/ignite/contrib/engines/common.py +++ b/ignite/contrib/engines/common.py @@ -1,7 +1,7 @@ import numbers import warnings from functools import partial -from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Union, cast +from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union, cast import torch import torch.nn as nn @@ -184,9 +184,7 @@ def _setup_common_training_handlers( checkpoint_handler = Checkpoint( to_save, cast(Union[Callable, BaseSaveHandler], save_handler), filename_prefix="training", **kwargs ) - trainer.add_event_handler( - Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler - ) # type: ignore[arg-type] + trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler) if with_gpu_stats: GpuInfo().attach( @@ -195,7 +193,7 @@ def _setup_common_training_handlers( if output_names is not None: - def output_transform(x, index, name): + def output_transform(x: Any, index: int, name: str) -> Any: if isinstance(x, Mapping): return x[name] elif isinstance(x, Sequence): @@ -216,9 +214,7 @@ def output_transform(x, index, name): if with_pbars: if with_pbar_on_iters: ProgressBar(persist=False).attach( - trainer, - metric_names="all", - event_name=Events.ITERATION_COMPLETED(every=log_every_iters), # type: ignore[arg-type] + trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters) ) ProgressBar(persist=True, bar_format="").attach( @@ -266,18 +262,18 @@ def _setup_common_distrib_training_handlers( raise TypeError("Train sampler should be torch DistributedSampler and have `set_epoch` method") @trainer.on(Events.EPOCH_STARTED) - def distrib_set_epoch(engine): - train_sampler.set_epoch(engine.state.epoch - 1) + def distrib_set_epoch(engine: Engine) -> None: + cast(DistributedSampler, train_sampler).set_epoch(engine.state.epoch - 1) -def empty_cuda_cache(_) -> None: +def empty_cuda_cache(_: Engine) -> None: torch.cuda.empty_cache() import gc gc.collect() -def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters) -> None: +def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters) -> None: # type: ignore raise DeprecationWarning( "ignite.contrib.engines.common.setup_any_logging is deprecated since 0.4.0. and will be remove in 0.6.0. " "Please use instead: setup_tb_logging, setup_visdom_logging or setup_mlflow_logging etc." @@ -549,7 +545,7 @@ def setup_trains_logging( def get_default_score_fn(metric_name: str) -> Any: - def wrapper(engine: Engine): + def wrapper(engine: Engine) -> Any: score = engine.state.metrics[metric_name] return score diff --git a/ignite/contrib/engines/tbptt.py b/ignite/contrib/engines/tbptt.py index 4dadf4b190fb..d3efba4accbf 100644 --- a/ignite/contrib/engines/tbptt.py +++ b/ignite/contrib/engines/tbptt.py @@ -86,7 +86,7 @@ def create_supervised_tbptt_trainer( """ - def _update(engine: Engine, batch: Sequence[torch.Tensor]): + def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> float: loss_list = [] hidden = None diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py index 9e7db89f666f..a0fe33f561e6 100644 --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -159,7 +159,7 @@ def attach( engine: Engine, metric_names: Optional[str] = None, output_transform: Optional[Callable] = None, - event_name: Events = Events.ITERATION_COMPLETED, + event_name: Union[CallableEventWithFilter, Events] = Events.ITERATION_COMPLETED, closing_event_name: Events = Events.EPOCH_COMPLETED, ): """