From a94b50e8ec85cc1d81cec9e9f28edaf1725ee34b Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 16:14:17 +0630 Subject: [PATCH 1/3] docs: rm type hints in ignite.metrics --- docs/requirements.txt | 2 +- docs/source/conf.py | 1 + ignite/metrics/accumulation.py | 14 +++--- ignite/metrics/accuracy.py | 6 +-- ignite/metrics/confusion_matrix.py | 30 ++++++------ ignite/metrics/epoch_metric.py | 8 ++-- ignite/metrics/fbeta.py | 12 ++--- ignite/metrics/loss.py | 8 ++-- ignite/metrics/mean_absolute_error.py | 10 ++++ ignite/metrics/mean_pairwise_distance.py | 12 +++++ ignite/metrics/mean_squared_error.py | 10 ++++ ignite/metrics/metric.py | 46 ++++++++++--------- ignite/metrics/metrics_lambda.py | 6 ++- ignite/metrics/multilabel_confusion_matrix.py | 7 +-- ignite/metrics/precision.py | 8 ++-- ignite/metrics/psnr.py | 6 +-- ignite/metrics/recall.py | 8 ++-- ignite/metrics/root_mean_squared_error.py | 10 ++++ ignite/metrics/running_average.py | 10 ++-- ignite/metrics/ssim.py | 16 +++---- ignite/metrics/top_k_categorical_accuracy.py | 11 +++++ 21 files changed, 150 insertions(+), 91 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 4c94a44e1dc8..f7b4ab9b632e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ -sphinx==3.2.1 +sphinx==3.4 -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme sphinxcontrib-katex diff --git a/docs/source/conf.py b/docs/source/conf.py index a69170d460c4..2c8c5e660d30 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -208,6 +208,7 @@ autoclass_content = "both" autodoc_typehints = "description" +napoleon_attr_annotations = True # -- A patch that turns-off cross refs for type annotations ------------------ diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py index c26d1758d79f..6a1d7091d58a 100644 --- a/ignite/metrics/accumulation.py +++ b/ignite/metrics/accumulation.py @@ -25,13 +25,13 @@ class VariableAccumulation(Metric): - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`). Args: - op (callable): a callable to update accumulator. Method's signature is `(accumulator, output)`. + op: a callable to update accumulator. Method's signature is `(accumulator, output)`. For example, to compute arithmetic mean value, `op = lambda a, x: a + x`. - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. @@ -112,11 +112,11 @@ class Average(VariableAccumulation): # state.metrics['mean_custom_var'] -> average of output['custom_var'] Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. """ @@ -159,11 +159,11 @@ class GeometricAverage(VariableAccumulation): is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)` Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/accuracy.py b/ignite/metrics/accuracy.py index 460fec31688e..542adb16ec93 100644 --- a/ignite/metrics/accuracy.py +++ b/ignite/metrics/accuracy.py @@ -119,12 +119,12 @@ def thresholded_output_transform(output): Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - is_multilabel (bool, optional): flag to use in multilabel case. By default, False. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + is_multilabel: flag to use in multilabel case. By default, False. + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/confusion_matrix.py b/ignite/metrics/confusion_matrix.py index d4fba5d4365d..a1305eb38c5d 100644 --- a/ignite/metrics/confusion_matrix.py +++ b/ignite/metrics/confusion_matrix.py @@ -21,17 +21,17 @@ class ConfusionMatrix(Metric): predicted classes. Args: - num_classes (int): Number of classes, should be > 1. See notes for more details. - average (str, optional): confusion matrix values averaging schema: None, "samples", "recall", "precision". + num_classes: Number of classes, should be > 1. See notes for more details. + average: confusion matrix values averaging schema: None, "samples", "recall", "precision". Default is None. If `average="samples"` then confusion matrix values are normalized by the number of seen samples. If `average="recall"` then confusion matrix values are normalized such that diagonal values represent class recalls. If `average="precision"` then confusion matrix values are normalized such that diagonal values represent class precisions. - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. @@ -161,8 +161,8 @@ def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambd .. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert } Args: - cm (ConfusionMatrix): instance of confusion matrix metric - ignore_index (int, optional): index to ignore, e.g. background index + cm: instance of confusion matrix metric + ignore_index: index to ignore, e.g. background index Returns: MetricsLambda @@ -212,8 +212,8 @@ def mIoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLamb """Calculates mean Intersection over Union using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: - cm (ConfusionMatrix): instance of confusion matrix metric - ignore_index (int, optional): index to ignore, e.g. background index + cm: instance of confusion matrix metric + ignore_index: index to ignore, e.g. background index Returns: MetricsLambda @@ -240,7 +240,7 @@ def cmAccuracy(cm: ConfusionMatrix) -> MetricsLambda: """Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: - cm (ConfusionMatrix): instance of confusion matrix metric + cm: instance of confusion matrix metric Returns: MetricsLambda @@ -255,8 +255,8 @@ def cmPrecision(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda: """Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: - cm (ConfusionMatrix): instance of confusion matrix metric - average (bool, optional): if True metric value is averaged over all classes + cm: instance of confusion matrix metric + average: if True metric value is averaged over all classes Returns: MetricsLambda """ @@ -274,8 +274,8 @@ def cmRecall(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda: """ Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric. Args: - cm (ConfusionMatrix): instance of confusion matrix metric - average (bool, optional): if True metric value is averaged over all classes + cm: instance of confusion matrix metric + average: if True metric value is averaged over all classes Returns: MetricsLambda """ @@ -293,8 +293,8 @@ def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> """Calculates Dice Coefficient for a given :class:`~ignite.metrics.ConfusionMatrix` metric. Args: - cm (ConfusionMatrix): instance of confusion matrix metric - ignore_index (int, optional): index to ignore, e.g. background index + cm: instance of confusion matrix metric + ignore_index: index to ignore, e.g. background index """ if not isinstance(cm, ConfusionMatrix): diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py index cc8f3ec4a7c0..2ad5511dbbb6 100644 --- a/ignite/metrics/epoch_metric.py +++ b/ignite/metrics/epoch_metric.py @@ -31,17 +31,17 @@ class EpochMetric(Metric): e.g. ``[[0, 1, 0, 1], ]``. Args: - compute_fn (callable): a callable with the signature (`torch.tensor`, `torch.tensor`) takes as the input + compute_fn: a callable with the signature (`torch.tensor`, `torch.tensor`) takes as the input `predictions` and `targets` and returns a scalar. Input tensors will be on specified ``device`` (see arg below). - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - check_compute_fn (bool): if True, ``compute_fn`` is run on the first batch of data to ensure there are no + check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``. Default, True. - device (str or torch.device, optional): optional device specification for internal storage. + device: optional device specification for internal storage. Warnings: EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed. diff --git a/ignite/metrics/fbeta.py b/ignite/metrics/fbeta.py index b417445c10ae..65f18a4e28cd 100644 --- a/ignite/metrics/fbeta.py +++ b/ignite/metrics/fbeta.py @@ -26,15 +26,15 @@ def Fbeta( where :math:`\beta` is a positive real factor. Args: - beta (float): weight of precision in harmonic mean - average (bool, optional): if True, F-beta score is computed as the unweighted average (across all classes + beta: weight of precision in harmonic mean + average: if True, F-beta score is computed as the unweighted average (across all classes in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case. - precision (Precision, optional): precision object metric with `average=False` to compute F-beta score - recall (Precision, optional): recall object metric with `average=False` to compute F-beta score - output_transform (callable, optional): a callable that is used to transform the + precision: precision object metric with `average=False` to compute F-beta score + recall: recall object metric with `average=False` to compute F-beta score + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. It is used only if precision or recall are not provided. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index 18fc34a36d87..b7a323fc198b 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -13,10 +13,10 @@ class Loss(Metric): Calculates the average loss according to the passed loss_fn. Args: - loss_fn (callable): a callable taking a prediction tensor, a target + loss_fn: a callable taking a prediction tensor, a target tensor, optionally other arguments, and returns the average loss over all observations in the batch. - output_transform (callable): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and @@ -24,9 +24,9 @@ class Loss(Metric): The output is expected to be a tuple `(prediction, target)` or (prediction, target, kwargs) where kwargs is a dictionary of extra keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`. - batch_size (callable): a callable taking a target tensor that returns the + batch_size: a callable taking a target tensor that returns the first dimension size (usually the batch size). - device (str or torch.device): specifies which device updates are accumulated on. Setting the + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/mean_absolute_error.py b/ignite/metrics/mean_absolute_error.py index 461ed54f8ab2..f93b08453ce4 100644 --- a/ignite/metrics/mean_absolute_error.py +++ b/ignite/metrics/mean_absolute_error.py @@ -16,6 +16,16 @@ class MeanAbsoluteError(Metric): where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor. - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + + Args: + output_transform: a callable that is used to transform the + :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the + form expected by the metric. This can be useful if, for example, you have a multi-output model and + you want to compute the metric with respect to one of the outputs. + By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + device: specifies which device updates are accumulated on. Setting the + metric's device to be the same as your ``update`` arguments ensures the ``update`` method is + non-blocking. By default, CPU. """ @reinit__is_reduced diff --git a/ignite/metrics/mean_pairwise_distance.py b/ignite/metrics/mean_pairwise_distance.py index 9751e8797a42..fc3a9ad68ae1 100644 --- a/ignite/metrics/mean_pairwise_distance.py +++ b/ignite/metrics/mean_pairwise_distance.py @@ -14,6 +14,18 @@ class MeanPairwiseDistance(Metric): Average of pairwise distances computed on provided batches. - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + + Args: + p: the norm degree. Default: 2 + eps: Small value to avoid division by zero. Default: 1e-6 + output_transform: a callable that is used to transform the + :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the + form expected by the metric. This can be useful if, for example, you have a multi-output model and + you want to compute the metric with respect to one of the outputs. + By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + device: specifies which device updates are accumulated on. Setting the + metric's device to be the same as your ``update`` arguments ensures the ``update`` method is + non-blocking. By default, CPU. """ def __init__( diff --git a/ignite/metrics/mean_squared_error.py b/ignite/metrics/mean_squared_error.py index 682b56756bf0..131c04b2efd6 100644 --- a/ignite/metrics/mean_squared_error.py +++ b/ignite/metrics/mean_squared_error.py @@ -16,6 +16,16 @@ class MeanSquaredError(Metric): where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor. - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + + Args: + output_transform: a callable that is used to transform the + :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the + form expected by the metric. This can be useful if, for example, you have a multi-output model and + you want to compute the metric with respect to one of the outputs. + By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + device: specifies which device updates are accumulated on. Setting the + metric's device to be the same as your ``update`` arguments ensures the ``update`` method is + non-blocking. By default, CPU. """ @reinit__is_reduced diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py index 053d7d0224ba..02e2acba7de9 100644 --- a/ignite/metrics/metric.py +++ b/ignite/metrics/metric.py @@ -60,10 +60,10 @@ class EpochWise(MetricUsage): - :meth:`~ignite.metrics.Metric.completed` on every ``EPOCH_COMPLETED``. Attributes: - usage_name (str): usage name string + usage_name: usage name string """ - usage_name = "epoch_wise" + usage_name: str = "epoch_wise" def __init__(self) -> None: super(EpochWise, self).__init__( @@ -84,10 +84,10 @@ class BatchWise(MetricUsage): - :meth:`~ignite.metrics.Metric.completed` on every ``ITERATION_COMPLETED``. Attributes: - usage_name (str): usage name string + usage_name: usage name string """ - usage_name = "batch_wise" + usage_name: str = "batch_wise" def __init__(self) -> None: super(BatchWise, self).__init__( @@ -108,8 +108,8 @@ class BatchFiltered(MetricUsage): - :meth:`~ignite.metrics.Metric.completed` on every ``EPOCH_COMPLETED``. Args: - *args: Positional arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED(*args, **kwargs)` - **kwargs: Keyword arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED(*args, **kwargs)` + args: Positional arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED(*args, **kwargs)` + kwargs: Keyword arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED(*args, **kwargs)` handled by :meth:`~ignite.metrics.Metric.iteration_completed`. """ @@ -127,17 +127,17 @@ class Metric(metaclass=ABCMeta): Base class for all Metrics. Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. - device (str or torch.device): specifies which device updates are accumulated on. Setting the + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. Attributes: - required_output_keys (tuple): dictionary defines required keys to be found in ``engine.state.output`` if the + required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the latter is a dictionary. Default, ``("y_pred", "y")``. This is useful with custom metrics that can require other arguments than predictions ``y_pred`` and targets ``y``. See notes below for an example. @@ -196,7 +196,7 @@ def compute(self): """ # public class attribute - required_output_keys = ("y_pred", "y") # type: Optional[Tuple] + required_output_keys: Optional[Tuple] = ("y_pred", "y") # for backward compatibility _required_output_keys = required_output_keys @@ -267,7 +267,7 @@ def started(self, engine: Engine) -> None: `engine` with :meth:`~ignite.metrics.Metric.attach`. Args: - engine (Engine): the engine to which the metric must be attached + engine: the engine to which the metric must be attached """ self.reset() @@ -277,7 +277,7 @@ def iteration_completed(self, engine: Engine) -> None: `engine` with :meth:`~ignite.metrics.Metric.attach`. Args: - engine (Engine): the engine to which the metric must be attached + engine: the engine to which the metric must be attached """ output = self._output_transform(engine.state.output) @@ -300,8 +300,8 @@ def completed(self, engine: Engine, name: str) -> None: `engine` with :meth:`~ignite.metrics.Metric.attach`. Args: - engine (Engine): the engine to which the metric must be attached - name (str): the name of the metric used as key in dict `engine.state.metrics` + engine: the engine to which the metric must be attached + name: the name of the metric used as key in dict `engine.state.metrics` .. versionchanged:: 0.4.3 Added dict in metrics results. @@ -338,9 +338,9 @@ def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = Epo contain computed metric's value under provided name. Args: - engine (Engine): the engine to which the metric must be attached - name (str): the name of the metric to attach - usage (str or MetricUsage, optional): the usage of the metric. Valid string values should be + engine: the engine to which the metric must be attached + name: the name of the metric to attach + usage: the usage of the metric. Valid string values should be :attr:`ignite.metrics.EpochWise.usage_name` (default) or :attr:`ignite.metrics.BatchWise.usage_name`. @@ -381,8 +381,8 @@ def detach(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) - and another metric (e.g. more expensive one) is done every n-th training epoch. Args: - engine (Engine): the engine from which the metric must be detached - usage (str or MetricUsage, optional): the usage of the metric. Valid string values should be + engine: the engine from which the metric must be detached + usage: the usage of the metric. Valid string values should be 'epoch_wise' (default) or 'batch_wise'. Example: @@ -423,8 +423,8 @@ def is_attached(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise value is written to `engine.state.metrics` dictionary. Args: - engine (Engine): the engine checked from which the metric should be attached - usage (str or MetricUsage, optional): the usage of the metric. Valid string values should be + engine: the engine checked from which the metric should be attached + usage: the usage of the metric. Valid string values should be 'epoch_wise' (default) or 'batch_wise'. """ usage = self._check_usage(usage) @@ -530,7 +530,7 @@ def sync_all_reduce(*attrs: Any) -> Callable: See :doc:`metrics` on how to use it. Args: - *attrs: attribute names of decorated class + attrs: attribute names of decorated class """ @@ -566,6 +566,8 @@ def reinit__is_reduced(func: Callable) -> Callable: See :doc:`metrics` on how to use it. + Args: + func: A callable to reinit. """ @wraps(func) diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py index 6514b71cfd00..eb41e101bb11 100644 --- a/ignite/metrics/metrics_lambda.py +++ b/ignite/metrics/metrics_lambda.py @@ -21,9 +21,11 @@ class MetricsLambda(Metric): automatically (but partially, e.g :meth:`~ignite.metrics.Metric.is_attached()` will return False). Args: - f (callable): the function that defines the computation - args (sequence): Sequence of other metrics or something + f: the function that defines the computation + args: Sequence of other metrics or something else that will be fed to ``f`` as arguments. + kwargs: Sequence of other metrics or something + else that will be fed to ``f`` as keyword arguments. Example: diff --git a/ignite/metrics/multilabel_confusion_matrix.py b/ignite/metrics/multilabel_confusion_matrix.py index 20f13fb7e525..673a154624ef 100644 --- a/ignite/metrics/multilabel_confusion_matrix.py +++ b/ignite/metrics/multilabel_confusion_matrix.py @@ -30,14 +30,15 @@ class MultiLabelConfusionMatrix(Metric): - The classes present in M are indexed as 0, ... , num_classes-1 as can be inferred from above. Args: - num_classes (int): Number of classes, should be > 1. - output_transform (callable, optional): a callable that is used to transform the + num_classes: Number of classes, should be > 1. + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + normalized: whether to normalized confusion matrix by its sum or not. .. versionadded:: 0.5.0 diff --git a/ignite/metrics/precision.py b/ignite/metrics/precision.py index 50142b7f78a3..ed31eebe27c7 100644 --- a/ignite/metrics/precision.py +++ b/ignite/metrics/precision.py @@ -103,15 +103,15 @@ def thresholded_output_transform(output): Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - average (bool, optional): if True, precision is computed as the unweighted average (across all classes + average: if True, precision is computed as the unweighted average (across all classes in multiclass case), otherwise, returns a tensor with the precision (for each class in multiclass case). - is_multilabel (bool, optional) flag to use in multilabel case. By default, value is False. If True, average + is_multilabel: flag to use in multilabel case. By default, value is False. If True, average parameter should be True and the average is computed across samples, instead of classes. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/psnr.py b/ignite/metrics/psnr.py index 8c00dba78725..7e930abb4ef6 100644 --- a/ignite/metrics/psnr.py +++ b/ignite/metrics/psnr.py @@ -20,12 +20,12 @@ class PSNR(Metric): - `y_pred` and `y` **must** have same dtype and same shape. Args: - data_range (int or float): The data range of the target image (distance between minimum + data_range: The data range of the target image (distance between minimum and maximum possible values). For other data types, please set the data range, otherwise an exception will be raised. - output_transform (callable, optional): A callable that is used to transform the Engine’s + output_transform: A callable that is used to transform the Engine’s process_function’s output into the form expected by the metric. - device (str or torch.device): specifies which device updates are accumulated on. + device: specifies which device updates are accumulated on. Setting the metric’s device to be the same as your update arguments ensures the update method is non-blocking. By default, CPU. diff --git a/ignite/metrics/recall.py b/ignite/metrics/recall.py index a11cb7d583bf..9ef4316c8806 100644 --- a/ignite/metrics/recall.py +++ b/ignite/metrics/recall.py @@ -50,15 +50,15 @@ def thresholded_output_transform(output): Args: - output_transform (callable, optional): a callable that is used to transform the + output_transform: a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. - average (bool, optional): if True, precision is computed as the unweighted average (across all classes + average: if True, precision is computed as the unweighted average (across all classes in multiclass case), otherwise, returns a tensor with the precision (for each class in multiclass case). - is_multilabel (bool, optional) flag to use in multilabel case. By default, value is False. If True, average + is_multilabel: flag to use in multilabel case. By default, value is False. If True, average parameter should be True and the average is computed across samples, instead of classes. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/root_mean_squared_error.py b/ignite/metrics/root_mean_squared_error.py index 0197fcceca34..47722862970b 100644 --- a/ignite/metrics/root_mean_squared_error.py +++ b/ignite/metrics/root_mean_squared_error.py @@ -16,6 +16,16 @@ class RootMeanSquaredError(MeanSquaredError): where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor. - ``update`` must receive output of the form (y_pred, y) or `{'y_pred': y_pred, 'y': y}`. + + Args: + output_transform: a callable that is used to transform the + :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the + form expected by the metric. This can be useful if, for example, you have a multi-output model and + you want to compute the metric with respect to one of the outputs. + By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + device: specifies which device updates are accumulated on. Setting the + metric's device to be the same as your ``update`` arguments ensures the ``update`` method is + non-blocking. By default, CPU. """ def compute(self) -> Union[torch.Tensor, float]: diff --git a/ignite/metrics/running_average.py b/ignite/metrics/running_average.py index db36b1a90483..d5c251d2b859 100644 --- a/ignite/metrics/running_average.py +++ b/ignite/metrics/running_average.py @@ -13,14 +13,14 @@ class RunningAverage(Metric): """Compute running average of a metric or the output of process function. Args: - src (Metric or None): input source: an instance of :class:`~ignite.metrics.Metric` or None. The latter + src: input source: an instance of :class:`~ignite.metrics.Metric` or None. The latter corresponds to `engine.state.output` which holds the output of process function. - alpha (float, optional): running average decay factor, default 0.98 - output_transform (callable, optional): a function to use to transform the output if `src` is None and + alpha: running average decay factor, default 0.98 + output_transform: a function to use to transform the output if `src` is None and corresponds the output of process function. Otherwise it should be None. - epoch_bound (boolean, optional): whether the running average should be reset after each epoch (defaults + epoch_bound: whether the running average should be reset after each epoch (defaults to True). - device (str or torch.device, optional): specifies which device updates are accumulated on. Should be + device: specifies which device updates are accumulated on. Should be None when ``src`` is an instance of :class:`~ignite.metrics.Metric`, as the running average will use the ``src``'s device. Otherwise, defaults to CPU. Only applicable when the computed value from the metric is a tensor. diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py index 664ee35ffbd8..67f3941050c5 100644 --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -14,17 +14,17 @@ class SSIM(Metric): Computes Structual Similarity Index Measure Args: - data_range (int or float): Range of the image. Typically, ``1.0`` or ``255``. - kernel_size (int or list or tuple of int): Size of the kernel. Default: (11, 11) - sigma (float or list or tuple of float): Standard deviation of the gaussian kernel. + data_range: Range of the image. Typically, ``1.0`` or ``255``. + kernel_size: Size of the kernel. Default: (11, 11) + sigma: Standard deviation of the gaussian kernel. Argument is used if ``gaussian=True``. Default: (1.5, 1.5) - k1 (float): Parameter of SSIM. Default: 0.01 - k2 (float): Parameter of SSIM. Default: 0.03 - gaussian (bool): ``True`` to use gaussian kernel, ``False`` to use uniform kernel - output_transform (callable, optional): A callable that is used to transform the + k1: Parameter of SSIM. Default: 0.01 + k2: Parameter of SSIM. Default: 0.03 + gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel + output_transform: A callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. - device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's + device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. diff --git a/ignite/metrics/top_k_categorical_accuracy.py b/ignite/metrics/top_k_categorical_accuracy.py index 20d948003ebc..b6bec2de3970 100644 --- a/ignite/metrics/top_k_categorical_accuracy.py +++ b/ignite/metrics/top_k_categorical_accuracy.py @@ -13,6 +13,17 @@ class TopKCategoricalAccuracy(Metric): Calculates the top-k categorical accuracy. - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + + Args: + k: the k in “top-k”. + output_transform: a callable that is used to transform the + :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the + form expected by the metric. This can be useful if, for example, you have a multi-output model and + you want to compute the metric with respect to one of the outputs. + By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. + device: specifies which device updates are accumulated on. Setting the + metric's device to be the same as your ``update`` arguments ensures the ``update`` method is + non-blocking. By default, CPU. """ def __init__( From b82b625c8ec700c094123ba7221851c8a4a37bcf Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 21:28:41 +0630 Subject: [PATCH 2/3] fix: down sphinx=3.2.1, normalize --- docs/requirements.txt | 2 +- docs/source/conf.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index f7b4ab9b632e..4c94a44e1dc8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ -sphinx==3.4 +sphinx==3.2.1 -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme sphinxcontrib-katex diff --git a/docs/source/conf.py b/docs/source/conf.py index 2c8c5e660d30..6d76d5276165 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -206,6 +206,7 @@ # -- Type hints configs ------------------------------------------------------ +autodoc_inherit_docstrings = True autoclass_content = "both" autodoc_typehints = "description" napoleon_attr_annotations = True From 79b4fa1e72b5a199dfd87443ab6e9faa1efe334f Mon Sep 17 00:00:00 2001 From: ydcjeff Date: Tue, 23 Feb 2021 21:28:51 +0630 Subject: [PATCH 3/3] fix: down sphinx=3.2.1, normalize --- ignite/metrics/multilabel_confusion_matrix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ignite/metrics/multilabel_confusion_matrix.py b/ignite/metrics/multilabel_confusion_matrix.py index 673a154624ef..bbdef7abfadb 100644 --- a/ignite/metrics/multilabel_confusion_matrix.py +++ b/ignite/metrics/multilabel_confusion_matrix.py @@ -38,7 +38,7 @@ class MultiLabelConfusionMatrix(Metric): device: specifies which device updates are accumulated on. Setting the metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. - normalized: whether to normalized confusion matrix by its sum or not. + normalized: whether to normalize confusion matrix by its sum or not. .. versionadded:: 0.5.0