diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py
index 6fd9949b274d..407ced050b4c 100644
--- a/ignite/contrib/handlers/base_logger.py
+++ b/ignite/contrib/handlers/base_logger.py
@@ -107,7 +107,7 @@ class BaseWeightsScalarHandler(BaseHandler):
Helper handler to log model's weights as scalars.
"""
- def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
if not isinstance(model, torch.nn.Module):
raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}")
@@ -152,14 +152,14 @@ def attach(
"""Attach the logger to the engine and execute `log_handler` function at `event_name` events.
Args:
- engine (Engine): engine object.
- log_handler (callable): a logging handler to execute
+ engine: engine object.
+ log_handler: a logging handler to execute
event_name: event to attach the logging handler to. Valid events are from
- :class:`~ignite.engine.events.Events` or class:`~ignite.engine.events.EventsList` or any `event_name`
+ :class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name`
added by :meth:`~ignite.engine.engine.Engine.register_events`.
Returns:
- :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler.
+ :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
if isinstance(event_name, EventsList):
for name in event_name:
@@ -180,15 +180,15 @@ def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **k
"""Shortcut method to attach `OutputHandler` to the logger.
Args:
- engine (Engine): engine object.
+ engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
- *args: args to initialize `OutputHandler`
- **kwargs: kwargs to initialize `OutputHandler`
+ args: args to initialize `OutputHandler`
+ kwargs: kwargs to initialize `OutputHandler`
Returns:
- :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler.
+ :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
"""
return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name)
@@ -198,15 +198,15 @@ def attach_opt_params_handler(
"""Shortcut method to attach `OptimizerParamsHandler` to the logger.
Args:
- engine (Engine): engine object.
+ engine: engine object.
event_name: event to attach the logging handler to. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
- *args: args to initialize `OptimizerParamsHandler`
- **kwargs: kwargs to initialize `OptimizerParamsHandler`
+ args: args to initialize `OptimizerParamsHandler`
+ kwargs: kwargs to initialize `OptimizerParamsHandler`
Returns:
- :class:`~ignite.engine.RemovableEventHandle`, which can be used to remove the handler.
+ :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
.. versionchanged:: 0.4.3
Added missing return statement.
diff --git a/ignite/contrib/handlers/clearml_logger.py b/ignite/contrib/handlers/clearml_logger.py
index e4d04f5f3dad..f67dec5ba092 100644
--- a/ignite/contrib/handlers/clearml_logger.py
+++ b/ignite/contrib/handlers/clearml_logger.py
@@ -49,11 +49,11 @@ class ClearMLLogger(BaseLogger):
clearml-init
Args:
- project_name (str): The name of the project in which the experiment will be created. If the project
+ project_name: The name of the project in which the experiment will be created. If the project
does not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
- task_name (str): The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
+ task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
script's file name is used. (Optional)
- task_type (str): Optional. The task type. Valid values are:
+ task_type: Optional. The task type. Valid values are:
- ``TaskTypes.training`` (Default)
- ``TaskTypes.train``
- ``TaskTypes.testing``
@@ -119,7 +119,7 @@ class ClearMLLogger(BaseLogger):
"""
- def __init__(self, *_: Any, **kwargs: Any) -> None:
+ def __init__(self, *_: Any, **kwargs: Any):
try:
from clearml import Task
from clearml.binding.frameworks.tensorflow_bind import WeightsGradientHistHelper
@@ -270,14 +270,14 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
@@ -299,7 +299,7 @@ def __init__(
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
- ) -> None:
+ ):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -359,13 +359,13 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None:
+ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -410,13 +410,13 @@ class WeightsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -463,12 +463,12 @@ class WeightsHistHandler(BaseWeightsHistHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- tag (str, optional): common title for all produced plots. For example, 'generator'
+ model: model to log weights
+ tag: common title for all produced plots. For example, 'generator'
"""
- def __init__(self, model: Module, tag: Optional[str] = None) -> None:
+ def __init__(self, model: Module, tag: Optional[str] = None):
super(WeightsHistHandler, self).__init__(model, tag=tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -517,13 +517,13 @@ class GradsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(GradsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -569,12 +569,12 @@ class GradsHistHandler(BaseWeightsHistHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- tag (str, optional): common title for all produced plots. For example, 'generator'
+ model: model to log weights
+ tag: common title for all produced plots. For example, 'generator'
"""
- def __init__(self, model: Module, tag: Optional[str] = None) -> None:
+ def __init__(self, model: Module, tag: Optional[str] = None):
super(GradsHistHandler, self).__init__(model, tag=tag)
def __call__(self, engine: Engine, logger: ClearMLLogger, event_name: Union[str, Events]) -> None:
@@ -602,12 +602,12 @@ class ClearMLSaver(DiskSaver):
Handler that saves input checkpoint as ClearML artifacts
Args:
- logger (ClearMLLogger, optional): An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`,
+ logger: An instance of :class:`~ignite.contrib.handlers.clearml_logger.ClearMLLogger`,
ensuring a valid ClearML ``Task`` has been initialized. If not provided, and a ClearML Task
has not been manually initialized, a runtime error will be raised.
- output_uri (str, optional): The default location for output models and other artifacts uploaded by ClearML. For
+ output_uri: The default location for output models and other artifacts uploaded by ClearML. For
more information, see ``clearml.Task.init``.
- dirname (str, optional): Directory path where the checkpoint will be saved. If not provided, a temporary
+ dirname: Directory path where the checkpoint will be saved. If not provided, a temporary
directory will be created.
Examples:
@@ -645,7 +645,7 @@ def __init__(
dirname: Optional[str] = None,
*args: Any,
**kwargs: Any,
- ) -> None:
+ ):
self._setup_check_clearml(logger, output_uri)
@@ -793,7 +793,7 @@ def get_local_copy(self, filename: str) -> Optional[str]:
In distributed configuration this method should be called on rank 0 process.
Args:
- filename (str): artifact name.
+ filename: artifact name.
Returns:
a local path to a downloaded copy of the artifact
diff --git a/ignite/contrib/handlers/lr_finder.py b/ignite/contrib/handlers/lr_finder.py
index bc4401f821c3..b319ffaabd49 100644
--- a/ignite/contrib/handlers/lr_finder.py
+++ b/ignite/contrib/handlers/lr_finder.py
@@ -196,11 +196,11 @@ def plot(self, skip_start: int = 10, skip_end: int = 5, log_lr: bool = True) ->
pip install matplotlib
Args:
- skip_start (int, optional): number of batches to trim from the start.
+ skip_start: number of batches to trim from the start.
Default: 10.
- skip_end (int, optional): number of batches to trim from the start.
+ skip_end: number of batches to trim from the start.
Default: 5.
- log_lr (bool, optional): True to plot the learning rate in a logarithmic
+ log_lr: True to plot the learning rate in a logarithmic
scale; otherwise, plotted in a linear scale. Default: True.
"""
try:
@@ -273,27 +273,27 @@ def attach(
trainer_with_lr_finder.run(dataloader)`
Args:
- trainer (Engine): lr_finder is attached to this trainer. Please, keep in mind that all attached handlers
+ trainer: lr_finder is attached to this trainer. Please, keep in mind that all attached handlers
will be executed.
- to_save (Mapping): dictionary with optimizer and other objects that needs to be restored after running
+ to_save: dictionary with optimizer and other objects that needs to be restored after running
the LR finder. For example, `to_save={'optimizer': optimizer, 'model': model}`. All objects should
implement `state_dict` and `load_state_dict` methods.
- output_transform (callable, optional): function that transforms the trainer's `state.output` after each
+ output_transform: function that transforms the trainer's `state.output` after each
iteration. It must return the loss of that iteration.
- num_iter (int, optional): number of iterations for lr schedule between base lr and end_lr. Default, it will
+ num_iter: number of iterations for lr schedule between base lr and end_lr. Default, it will
run for `trainer.state.epoch_length * trainer.state.max_epochs`.
- end_lr (float, optional): upper bound for lr search. Default, 10.0.
- step_mode (str, optional): "exp" or "linear", which way should the lr be increased from optimizer's initial
+ end_lr: upper bound for lr search. Default, 10.0.
+ step_mode: "exp" or "linear", which way should the lr be increased from optimizer's initial
lr to `end_lr`. Default, "exp".
- smooth_f (float, optional): loss smoothing factor in range `[0, 1)`. Default, 0.05
- diverge_th (float, optional): Used for stopping the search when `current loss > diverge_th * best_loss`.
+ smooth_f: loss smoothing factor in range `[0, 1)`. Default, 0.05
+ diverge_th: Used for stopping the search when `current loss > diverge_th * best_loss`.
Default, 5.0.
+ Returns:
+ trainer_with_lr_finder (trainer used for finding the lr)
+
Note:
lr_finder cannot be attached to more than one trainer at a time.
-
- Returns:
- trainer_with_lr_finder: trainer used for finding the lr
"""
if not isinstance(to_save, Mapping):
raise TypeError(f"Argument to_save should be a mapping, but given {type(to_save)}")
@@ -363,16 +363,16 @@ class _ExponentialLR(_LRScheduler):
iterations.
Args:
- optimizer (torch.optim.Optimizer): wrapped optimizer.
- end_lr (float, optional): the initial learning rate which is the lower
+ optimizer: wrapped optimizer.
+ end_lr: the initial learning rate which is the lower
boundary of the test. Default: 10.
- num_iter (int, optional): the number of iterations over which the test
+ num_iter: the number of iterations over which the test
occurs. Default: 100.
- last_epoch (int): the index of last epoch. Default: -1.
+ last_epoch: the index of last epoch. Default: -1.
"""
- def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1) -> None:
+ def __init__(self, optimizer: Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1):
self.end_lr = end_lr
self.num_iter = num_iter
super(_ExponentialLR, self).__init__(optimizer, last_epoch)
diff --git a/ignite/contrib/handlers/mlflow_logger.py b/ignite/contrib/handlers/mlflow_logger.py
index d8a7819d8e6a..3b7602bee2a1 100644
--- a/ignite/contrib/handlers/mlflow_logger.py
+++ b/ignite/contrib/handlers/mlflow_logger.py
@@ -24,7 +24,7 @@ class MLflowLogger(BaseLogger):
pip install mlflow
Args:
- tracking_uri (str): MLflow tracking uri. See MLflow docs for more details
+ tracking_uri: MLflow tracking uri. See MLflow docs for more details
Examples:
@@ -86,7 +86,7 @@ class MLflowLogger(BaseLogger):
)
"""
- def __init__(self, tracking_uri: Optional[str] = None) -> None:
+ def __init__(self, tracking_uri: Optional[str] = None):
try:
import mlflow
except ImportError:
@@ -182,14 +182,14 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, 'training'
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, 'training'
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
@@ -284,13 +284,13 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, 'generator'
+ param_name: parameter name
+ tag: common title for all produced plots. For example, 'generator'
"""
- def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None:
+ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: MLflowLogger, event_name: Union[str, Events]) -> None:
diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py
index 314873d97341..f5fce066e9a6 100644
--- a/ignite/contrib/handlers/neptune_logger.py
+++ b/ignite/contrib/handlers/neptune_logger.py
@@ -40,30 +40,30 @@ class NeptuneLogger(BaseLogger):
pip install neptune-client
Args:
- api_token (str | None): Required in online mode. Neputne API token, found on https://neptune.ai.
+ api_token: Required in online mode. Neputne API token, found on https://neptune.ai.
Read how to get your API key
https://docs.neptune.ai/python-api/tutorials/get-started.html#copy-api-token.
- project_name (str): Required in online mode. Qualified name of a project in a form of
+ project_name: Required in online mode. Qualified name of a project in a form of
"namespace/project_name" for example "tom/minst-classification".
If None, the value of NEPTUNE_PROJECT environment variable will be taken.
You need to create the project in https://neptune.ai first.
- offline_mode (bool): Optional default False. If offline_mode=True no logs will be send to neptune.
+ offline_mode: Optional default False. If offline_mode=True no logs will be send to neptune.
Usually used for debug purposes.
- experiment_name (str, optional): Optional. Editable name of the experiment.
+ experiment_name: Optional. Editable name of the experiment.
Name is displayed in the experiment’s Details (Metadata section) and in experiments view as a column.
- upload_source_files (list, optional): Optional. List of source files to be uploaded.
+ upload_source_files: Optional. List of source files to be uploaded.
Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab.
If None is passed, Python file from which experiment was created will be uploaded.
Pass empty list (`[]`) to upload no files. Unix style pathname pattern expansion is supported.
For example, you can pass `*.py` to upload all python source files from the current directory.
For recursion lookup use `**/*.py` (for Python 3.5 and later). For more information see glob library.
- params (dict, optional): Optional. Parameters of the experiment. After experiment creation params are read-only.
+ params: Optional. Parameters of the experiment. After experiment creation params are read-only.
Parameters are displayed in the experiment’s Parameters section and each key-value pair can be
viewed in experiments view as a column.
- properties (dict, optional): Optional default is `{}`. Properties of the experiment.
+ properties: Optional default is `{}`. Properties of the experiment.
They are editable after experiment is created. Properties are displayed in the experiment’s Details and
each key-value pair can be viewed in experiments view as a column.
- tags (list, optional): Optional default `[]`. Must be list of str. Tags of the experiment.
+ tags: Optional default `[]`. Must be list of str. Tags of the experiment.
Tags are displayed in the experiment’s Details and can be viewed in experiments view as a column.
Examples:
@@ -293,14 +293,14 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
@@ -323,7 +323,7 @@ def __init__(
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
- ) -> None:
+ ):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
@@ -385,13 +385,13 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None:
+ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
@@ -439,13 +439,13 @@ class WeightsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
@@ -495,13 +495,13 @@ class GradsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(GradsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str, Events]) -> None:
@@ -524,7 +524,7 @@ class NeptuneSaver(BaseSaveHandler):
"""Handler that saves input checkpoint to the Neptune server.
Args:
- neptune_logger (ignite.contrib.handlers.neptune_logger.NeptuneLogger): an instance of
+ neptune_logger: an instance of
NeptuneLogger class.
Examples:
diff --git a/ignite/contrib/handlers/param_scheduler.py b/ignite/contrib/handlers/param_scheduler.py
index 60f6d7fe0b63..61f8ba36ffcb 100644
--- a/ignite/contrib/handlers/param_scheduler.py
+++ b/ignite/contrib/handlers/param_scheduler.py
@@ -20,12 +20,12 @@ class ParamScheduler(metaclass=ABCMeta):
training.
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): name of optimizer's parameter to update.
- save_history (bool, optional): whether to log the parameter values to
+ param_name: name of optimizer's parameter to update.
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- param_group_index (int, optional): optimizer's parameters group to use
+ param_group_index: optimizer's parameters group to use
Note:
Parameter scheduler works independently of the internal state of the attached optimizer.
@@ -40,7 +40,7 @@ def __init__(
param_name: str,
save_history: bool = False,
param_group_index: Optional[int] = None,
- ) -> None:
+ ):
if not (
isinstance(optimizer, Optimizer)
@@ -120,7 +120,7 @@ def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ParamScheduler.
Args:
- state_dict (dict): a dict containing parameters.
+ state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
@@ -151,11 +151,11 @@ def simulate_values(cls, num_events: int, **scheduler_kwargs: Any) -> List[List[
"""Method to simulate scheduled values during `num_events` events.
Args:
- num_events (int): number of events during the simulation.
- **scheduler_kwargs : parameter scheduler configuration kwargs.
+ num_events: number of events during the simulation.
+ scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
- list of pairs: [event_index, value]
+ event_index, value
Examples:
@@ -193,8 +193,8 @@ def plot_values(cls, num_events: int, **scheduler_kwargs: Mapping) -> Any:
pip install matplotlib
Args:
- num_events (int): number of events during the simulation.
- **scheduler_kwargs : parameter scheduler configuration kwargs.
+ num_events: number of events during the simulation.
+ scheduler_kwargs: parameter scheduler configuration kwargs.
Returns:
matplotlib.lines.Line2D
@@ -230,21 +230,21 @@ class CyclicalScheduler(ParamScheduler):
cycle of some size.
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): name of optimizer's parameter to update.
- start_value (float): value at start of cycle.
- end_value (float): value at the middle of the cycle.
- cycle_size (int): length of cycle, value should be larger than 1.
- cycle_mult (float, optional): ratio by which to change the cycle_size.
+ param_name: name of optimizer's parameter to update.
+ start_value: value at start of cycle.
+ end_value: value at the middle of the cycle.
+ cycle_size: length of cycle, value should be larger than 1.
+ cycle_mult: ratio by which to change the cycle_size.
at the end of each cycle (default=1.0).
- start_value_mult (float, optional): ratio by which to change the start value at the
+ start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
- end_value_mult (float, optional): ratio by which to change the end value at the
+ end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
- save_history (bool, optional): whether to log the parameter values to
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- param_group_index (int, optional): optimizer's parameters group to use.
+ param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
@@ -304,21 +304,21 @@ class LinearCyclicalScheduler(CyclicalScheduler):
adjusts it back to 'start_value' for a half-cycle.
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): name of optimizer's parameter to update.
- start_value (float): value at start of cycle.
- end_value (float): value at the middle of the cycle.
- cycle_size (int): length of cycle.
- cycle_mult (float, optional): ratio by which to change the cycle_size
+ param_name: name of optimizer's parameter to update.
+ start_value: value at start of cycle.
+ end_value: value at the middle of the cycle.
+ cycle_size: length of cycle.
+ cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
- start_value_mult (float, optional): ratio by which to change the start value at the
+ start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
- end_value_mult (float, optional): ratio by which to change the end value at the
+ end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
- save_history (bool, optional): whether to log the parameter values to
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- param_group_index (int, optional): optimizer's parameters group to use.
+ param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
@@ -350,21 +350,21 @@ class CosineAnnealingScheduler(CyclicalScheduler):
wave (as suggested in [Smith17]_).
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): name of optimizer's parameter to update.
- start_value (float): value at start of cycle.
- end_value (float): value at the end of the cycle.
- cycle_size (int): length of cycle.
- cycle_mult (float, optional): ratio by which to change the cycle_size
+ param_name: name of optimizer's parameter to update.
+ start_value: value at start of cycle.
+ end_value: value at the end of the cycle.
+ cycle_size: length of cycle.
+ cycle_mult: ratio by which to change the cycle_size
at the end of each cycle (default=1).
- start_value_mult (float, optional): ratio by which to change the start value at the
+ start_value_mult: ratio by which to change the start value at the
end of each cycle (default=1.0).
- end_value_mult (float, optional): ratio by which to change the end value at the
+ end_value_mult: ratio by which to change the end value at the
end of each cycle (default=1.0).
- save_history (bool, optional): whether to log the parameter values to
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- param_group_index (int, optional): optimizer's parameters group to use.
+ param_group_index: optimizer's parameters group to use.
Note:
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
@@ -418,9 +418,9 @@ class ConcatScheduler(ParamScheduler):
scheduler is defined by `durations` list of integers.
Args:
- schedulers (list of ParamScheduler): list of parameter schedulers.
- durations (list of int): list of number of events that lasts a parameter scheduler from schedulers.
- save_history (bool, optional): whether to log the parameter values to
+ schedulers: list of parameter schedulers.
+ durations: list of number of events that lasts a parameter scheduler from schedulers.
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
Examples:
@@ -444,7 +444,7 @@ class ConcatScheduler(ParamScheduler):
"""
- def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False) -> None:
+ def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False):
if not isinstance(schedulers, Sequence):
raise TypeError(f"Argument schedulers should be a sequence, but given {schedulers}")
@@ -520,7 +520,7 @@ def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ConcatScheduler.
Args:
- state_dict (dict): a dict containing parameters.
+ state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
@@ -579,15 +579,14 @@ def simulate_values( # type: ignore[override]
schedulers: List[ParamScheduler],
durations: List[int],
param_names: Optional[Union[List[str], Tuple[str]]] = None,
- **kwargs: Any,
) -> List[List[int]]:
"""Method to simulate scheduled values during num_events events.
Args:
- num_events (int): number of events during the simulation.
- schedulers (list of ParamScheduler): list of parameter schedulers.
- durations (list of int): list of number of events that lasts a parameter scheduler from schedulers.
- param_names (list or tuple of str, optional): parameter name or list of parameter names to simulate values.
+ num_events: number of events during the simulation.
+ schedulers: list of parameter schedulers.
+ durations: list of number of events that lasts a parameter scheduler from schedulers.
+ param_names: parameter name or list of parameter names to simulate values.
By default, the first scheduler's parameter name is taken.
Returns:
@@ -626,9 +625,7 @@ def simulate_values( # type: ignore[override]
s.save_history = False
output = []
- scheduler = cls( # type: ignore[call-arg]
- schedulers=schedulers, save_history=False, durations=durations, **kwargs
- )
+ scheduler = cls(schedulers=schedulers, save_history=False, durations=durations)
if param_names is None:
param_names = [scheduler.param_name]
for i in range(num_events):
@@ -651,8 +648,8 @@ class LRScheduler(ParamScheduler):
"""A wrapper class to call `torch.optim.lr_scheduler` objects as `ignite` handlers.
Args:
- lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
- save_history (bool, optional): whether to log the parameter values to
+ lr_scheduler: lr_scheduler object to wrap.
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
.. code-block:: python
@@ -671,7 +668,7 @@ class LRScheduler(ParamScheduler):
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
"""
- def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False) -> None:
+ def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False):
if not isinstance(lr_scheduler, _LRScheduler):
raise TypeError(
@@ -710,11 +707,11 @@ def simulate_values( # type: ignore[override]
"""Method to simulate scheduled values during num_events events.
Args:
- num_events (int): number of events during the simulation.
- lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
+ num_events: number of events during the simulation.
+ lr_scheduler: lr_scheduler object to wrap.
Returns:
- list of pairs: [event_index, value]
+ event_index, value
"""
@@ -761,20 +758,20 @@ def create_lr_scheduler_with_warmup(
Helper method to create a learning rate scheduler with a linear warm-up.
Args:
- lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler
+ lr_scheduler: learning rate scheduler
after the warm-up.
- warmup_start_value (float): learning rate start value of the warm-up phase.
- warmup_duration (int): warm-up phase duration, number of events.
- warmup_end_value (float, optional): learning rate end value of the warm-up phase, (default=None). If None,
+ warmup_start_value: learning rate start value of the warm-up phase.
+ warmup_duration: warm-up phase duration, number of events.
+ warmup_end_value: learning rate end value of the warm-up phase, (default=None). If None,
warmup_end_value is set to optimizer initial lr.
- save_history (bool, optional): whether to log the parameter values to
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- output_simulated_values (list, optional): optional output of simulated learning rate values.
+ output_simulated_values: optional output of simulated learning rate values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated learning rate values.
Returns:
- ConcatScheduler: learning rate scheduler with linear warm-up.
+ ConcatScheduler
Note:
If the first learning rate value provided by `lr_scheduler` is different from `warmup_end_value`, an additional
@@ -876,18 +873,14 @@ class PiecewiseLinear(ParamScheduler):
Piecewise linear parameter scheduler
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): name of optimizer's parameter to update.
- milestones_values (list of tuples (int, float)): list of tuples (event index, parameter value)
+ param_name: name of optimizer's parameter to update.
+ milestones_values: list of tuples (event index, parameter value)
represents milestones and parameter. Milestones should be increasing integers.
- save_history (bool, optional): whether to log the parameter values to
+ save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
- param_group_index (int, optional): optimizer's parameters group to use.
-
- Returns:
- PiecewiseLinear: piecewise linear scheduler
-
+ param_group_index: optimizer's parameters group to use.
.. code-block:: python
@@ -967,8 +960,9 @@ class ParamGroupScheduler:
Scheduler helper to group multiple schedulers into one.
Args:
- schedulers (list/tuple of ParamScheduler): list/tuple of parameter schedulers.
- names (list of str): list of names of schedulers.
+ schedulers: list/tuple of parameter schedulers.
+ names: list of names of schedulers.
+ save_history: whether to save history or not.
.. code-block:: python
@@ -1055,7 +1049,7 @@ def load_state_dict(self, state_dict: Mapping) -> None:
"""Copies parameters from :attr:`state_dict` into this ParamScheduler.
Args:
- state_dict (dict): a dict containing parameters.
+ state_dict: a dict containing parameters.
"""
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
@@ -1083,11 +1077,13 @@ def simulate_values(cls, num_events: int, schedulers: List[_LRScheduler], **kwar
"""Method to simulate scheduled values during num_events events.
Args:
- num_events (int): number of events during the simulation.
- schedulers (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.
+ num_events: number of events during the simulation.
+ schedulers: lr_scheduler object to wrap.
+ kwargs: kwargs passed to construct an instance of
+ :class:`ignite.contrib.handlers.param_scheduler.ParamGroupScheduler`.
Returns:
- list of pairs: [event_index, value]
+ event_index, value
"""
diff --git a/ignite/contrib/handlers/polyaxon_logger.py b/ignite/contrib/handlers/polyaxon_logger.py
index 62d3b4629c85..dc2df4183650 100644
--- a/ignite/contrib/handlers/polyaxon_logger.py
+++ b/ignite/contrib/handlers/polyaxon_logger.py
@@ -84,14 +84,14 @@ class PolyaxonLogger(BaseLogger):
)
Args:
- *args: Positional arguments accepted from
+ args: Positional arguments accepted from
`Experiment `_.
- **kwargs: Keyword arguments accepted from
+ kwargs: Keyword arguments accepted from
`Experiment `_.
"""
- def __init__(self, *args: Any, **kwargs: Any) -> None:
+ def __init__(self, *args: Any, **kwargs: Any):
try:
from polyaxon_client.tracking import Experiment
except ImportError:
@@ -174,14 +174,14 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
@@ -204,7 +204,7 @@ def __init__(
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
- ) -> None:
+ ):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
@@ -263,13 +263,13 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None:
+ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: PolyaxonLogger, event_name: Union[str, Events]) -> None:
diff --git a/ignite/contrib/handlers/stores.py b/ignite/contrib/handlers/stores.py
index 67e1706fe672..2c35aa5480ff 100644
--- a/ignite/contrib/handlers/stores.py
+++ b/ignite/contrib/handlers/stores.py
@@ -12,7 +12,7 @@ class EpochOutputStore:
larger than available RAM.
Args:
- output_transform (callable, optional): a callable that is used to
+ output_transform: a callable that is used to
transform the :class:`~ignite.engine.engine.Engine`'s
``process_function``'s output , e.g., lambda x: x[0]
@@ -32,7 +32,7 @@ def log_training_results(engine):
.. versionadded:: 0.4.2
"""
- def __init__(self, output_transform: Callable = lambda x: x) -> None:
+ def __init__(self, output_transform: Callable = lambda x: x):
self.data = [] # type: List[Union[int, Tuple[int, int]]]
self.output_transform = output_transform
diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py
index bf0cf35cd4c6..75d947370310 100644
--- a/ignite/contrib/handlers/tensorboard_logger.py
+++ b/ignite/contrib/handlers/tensorboard_logger.py
@@ -44,10 +44,10 @@ class TensorboardLogger(BaseLogger):
(>=v1.2.0).
Args:
- *args: Positional arguments accepted from
+ args: Positional arguments accepted from
`SummaryWriter
`_.
- **kwargs: Keyword arguments accepted from
+ kwargs: Keyword arguments accepted from
`SummaryWriter
`_.
For example, `log_dir` to setup path to the directory where to log.
@@ -149,7 +149,7 @@ class TensorboardLogger(BaseLogger):
"""
- def __init__(self, *args: Any, **kwargs: Any) -> None:
+ def __init__(self, *args: Any, **kwargs: Any):
try:
from tensorboardX import SummaryWriter
except ImportError:
@@ -236,14 +236,14 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
@@ -266,7 +266,7 @@ def __init__(
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
- ) -> None:
+ ):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]) -> None:
@@ -319,13 +319,13 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None) -> None:
+ def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
@@ -365,13 +365,13 @@ class WeightsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
@@ -411,8 +411,8 @@ class WeightsHistHandler(BaseWeightsHistHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ tag: common title for all produced plots. For example, "generator"
"""
@@ -457,13 +457,13 @@ class GradsScalarHandler(BaseWeightsScalarHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(GradsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
@@ -502,12 +502,12 @@ class GradsHistHandler(BaseWeightsHistHandler):
)
Args:
- model (torch.nn.Module): model to log weights
- tag (str, optional): common title for all produced plots. For example, "generator"
+ model: model to log weights
+ tag: common title for all produced plots. For example, "generator"
"""
- def __init__(self, model: nn.Module, tag: Optional[str] = None) -> None:
+ def __init__(self, model: nn.Module, tag: Optional[str] = None):
super(GradsHistHandler, self).__init__(model, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:
diff --git a/ignite/contrib/handlers/time_profilers.py b/ignite/contrib/handlers/time_profilers.py
index c081fcefb803..4cec5eba2ff5 100644
--- a/ignite/contrib/handlers/time_profilers.py
+++ b/ignite/contrib/handlers/time_profilers.py
@@ -268,6 +268,9 @@ def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
+ Args:
+ output_path: file output path containing a filename
+
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
@@ -347,6 +350,9 @@ def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
+ Args:
+ results: the aggregated results from the profiler
+
.. code-block:: python
profiler.print_results(results)
@@ -566,6 +572,11 @@ def _as_first_started(self, engine: Engine) -> None:
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
+ """Attach HandlersTimeProfiler to the given engine.
+
+ Args:
+ engine: the instance of Engine to attach
+ """
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
@@ -628,6 +639,9 @@ def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
+ Args:
+ output_path: file output path containing a filename
+
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
@@ -677,6 +691,9 @@ def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
+ Args:
+ results: the aggregated results from the profiler
+
.. code-block:: python
profiler.print_results(results)
diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
index 781b4a1c3cc0..d52280f7216c 100644
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -14,14 +14,14 @@ class ProgressBar(BaseLogger):
TQDM progress bar handler to log training progress and computed metrics.
Args:
- persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
- bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
+ persist: set to ``True`` to persist the progress bar after completion (default = ``False``)
+ bar_format : Specify a custom bar string formatting. May impact performance.
[default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
formatting, see `tqdm docs `_.
- **tqdm_kwargs: kwargs passed to tqdm progress bar.
+ tqdm_kwargs: kwargs passed to tqdm progress bar.
By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
number of epochs; however, if ``max_epochs`` are set to 1, the progress bar instead displays
"Iteration: [5/10]". If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
@@ -105,7 +105,7 @@ def __init__(
persist: bool = False,
bar_format: str = "{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]",
**tqdm_kwargs: Any,
- ) -> None:
+ ):
try:
from tqdm.autonotebook import tqdm
@@ -149,7 +149,7 @@ def log_message(self, message: str) -> None:
Logs a message, preserving the progress bar correct output format.
Args:
- message (str): string you wish to log.
+ message: string you wish to log.
"""
from tqdm import tqdm
@@ -167,10 +167,10 @@ def attach( # type: ignore[override]
Attaches the progress bar to an engine object.
Args:
- engine (Engine): engine object.
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ engine: engine object.
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): a function to select what you want to print from the engine's
+ output_transform: a function to select what you want to print from the engine's
output. This function may return either a dictionary with entries in the format of ``{name: value}``,
or a single scalar, which will be displayed with the default name `output`.
event_name: event's name on which the progress bar advances. Valid events are from
@@ -217,10 +217,10 @@ class _OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Args:
- description (str): progress bar description.
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ description: progress bar description.
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
with corresponding keys.
@@ -236,7 +236,7 @@ def __init__(
metric_names: Optional[Union[str, List[str]]] = None,
output_transform: Optional[Callable] = None,
closing_event_name: Union[Events, CallableEventWithFilter] = Events.EPOCH_COMPLETED,
- ) -> None:
+ ):
if metric_names is None and output_transform is None:
# This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
metric_names = []
diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py
index bb114bed76ea..f580a18a24a4 100644
--- a/ignite/contrib/handlers/visdom_logger.py
+++ b/ignite/contrib/handlers/visdom_logger.py
@@ -38,12 +38,12 @@ class VisdomLogger(BaseLogger):
pip install git+https://github.com/facebookresearch/visdom.git
Args:
- server (str, optional): visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL`
- port (int, optional): visdom server's port. It can be also specified by environment variable `VISDOM_PORT`
- num_workers (int, optional): number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to
+ server: visdom server URL. It can be also specified by environment variable `VISDOM_SERVER_URL`
+ port: visdom server's port. It can be also specified by environment variable `VISDOM_PORT`
+ num_workers: number of workers to use in `concurrent.futures.ThreadPoolExecutor` to post data to
visdom server. Default, `num_workers=1`. If `num_workers=0` and logger uses the main thread. If using
Python 2.7 and `num_workers>0` the package `futures` should be installed: `pip install futures`
- **kwargs: kwargs to pass into
+ kwargs: kwargs to pass into
`visdom.Visdom `_.
Note:
@@ -209,7 +209,7 @@ def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> "OptimizerPar
class _BaseVisDrawer:
- def __init__(self, show_legend: bool = False) -> None:
+ def __init__(self, show_legend: bool = False):
self.windows = {} # type: Dict[str, Any]
self.show_legend = show_legend
@@ -220,13 +220,13 @@ def add_scalar(
Helper method to log a scalar with VisdomLogger.
Args:
- logger (VisdomLogger): visdom logger
- k (str): scalar name which is used to set window title and y-axis label
- v (int or float): scalar value, y-axis value
+ logger: visdom logger
+ k: scalar name which is used to set window title and y-axis label
+ v: scalar value, y-axis value
event_name: Event name which is used to setup x-axis label. Valid events are from
:class:`~ignite.engine.events.Events` or any `event_name` added by
:meth:`~ignite.engine.engine.Engine.register_events`.
- global_step (int): global step, x-axis value
+ global_step: global step, x-axis value
"""
if k not in self.windows:
@@ -314,19 +314,19 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.visdom_logger.global_step_from_engine`.
- show_legend (bool, optional): flag to show legend in the window
+ show_legend: flag to show legend in the window
Note:
@@ -346,7 +346,7 @@ def __init__(
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
show_legend: bool = False,
- ) -> None:
+ ):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
@@ -411,16 +411,16 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
- show_legend (bool, optional): flag to show legend in the window
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
+ show_legend: flag to show legend in the window
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False,
- ) -> None:
+ ):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
@@ -463,15 +463,15 @@ class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
- show_legend (bool, optional): flag to show legend in the window
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
+ show_legend: flag to show legend in the window
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False,
- ) -> None:
+ ):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
@@ -513,16 +513,16 @@ class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
)
Args:
- model (torch.nn.Module): model to log weights
- reduction (callable): function to reduce parameters into scalar
- tag (str, optional): common title for all produced plots. For example, "generator"
- show_legend (bool, optional): flag to show legend in the window
+ model: model to log weights
+ reduction: function to reduce parameters into scalar
+ tag: common title for all produced plots. For example, "generator"
+ show_legend: flag to show legend in the window
"""
def __init__(
self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False,
- ) -> None:
+ ):
super(GradsScalarHandler, self).__init__(model, reduction, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py
index 4880523ee089..9b55e827a3c4 100644
--- a/ignite/contrib/handlers/wandb_logger.py
+++ b/ignite/contrib/handlers/wandb_logger.py
@@ -21,8 +21,8 @@ class WandBLogger(BaseLogger):
this wrapper. See examples on how to save model parameters and gradients.
Args:
- *args: Positional arguments accepted by `wandb.init`.
- **kwargs: Keyword arguments accepted by `wandb.init`.
+ args: Positional arguments accepted by `wandb.init`.
+ kwargs: Keyword arguments accepted by `wandb.init`.
Please see `wandb.init `_ for documentation of possible parameters.
Examples:
@@ -116,7 +116,7 @@ def score_function(engine):
evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {'model': model})
"""
- def __init__(self, *args: Any, **kwargs: Any) -> None:
+ def __init__(self, *args: Any, **kwargs: Any):
try:
import wandb
@@ -218,19 +218,19 @@ def global_step_transform(*args, **kwargs):
)
Args:
- tag (str): common title for all produced plots. For example, "training"
- metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
+ tag: common title for all produced plots. For example, "training"
+ metric_names: list of metric names to plot or a string "all" to plot all available
metrics.
- output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
+ output_transform: output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
- global_step_transform (callable, optional): global step transform function to output a desired global step.
+ global_step_transform: global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.wandb_logger.global_step_from_engine`.
- sync (bool, optional): If set to False, process calls to log in a seperate thread. Default (None) uses whatever
+ sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
Note:
@@ -251,7 +251,7 @@ def __init__(
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
sync: Optional[bool] = None,
- ) -> None:
+ ):
super().__init__(tag, metric_names, output_transform, global_step_transform)
self.sync = sync
@@ -308,17 +308,17 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
)
Args:
- optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
+ optimizer: torch optimizer or any object with attribute ``param_groups``
as a sequence.
- param_name (str): parameter name
- tag (str, optional): common title for all produced plots. For example, "generator"
- sync (bool, optional): If set to False, process calls to log in a seperate thread. Default (None) uses whatever
+ param_name: parameter name
+ tag: common title for all produced plots. For example, "generator"
+ sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
the default value of wandb.log.
"""
def __init__(
self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None,
- ) -> None:
+ ):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
self.sync = sync