Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix codestyle issues #1331

Merged
merged 14 commits into from
Oct 19, 2021
10 changes: 10 additions & 0 deletions catalyst/callbacks/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ def _load_states_from_file_map(
from files specified in ``load_map``.

Arguments:
logdir: directory with logs
runner: current runner
load_map (Dict[str, str]): dict with mappings to load.
Expected keys - ``'model'``, ``'criterion'``
Expand Down Expand Up @@ -485,6 +486,15 @@ def _save_checkpoint(
"""
Saves checkpoints: full with model/criterion/optimizer/scheduler
and truncated with model only.

Args:
runner: current runner.
checkpoint: data to save.
is_best: if ``True`` then also will be generated best checkpoint file.
is_last: if ``True`` then also will be generated last checkpoint file.

Returns:
path to saved checkpoint
"""
logdir = Path(f"{self.logdir}/")
suffix = f"{runner.stage_key}.{runner.stage_epoch_step}"
Expand Down
3 changes: 3 additions & 0 deletions catalyst/callbacks/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,9 @@ def __init__(
scale: learning rate increasing scale ("log" or "linear")
num_steps: number of batches to try, if None - whole loader would be used.
optimizer_key: which optimizer key to use for learning rate scheduling

Raises:
NotImplementedError: if invalid scale value.
"""
super().__init__(optimizer_key=optimizer_key)

Expand Down
11 changes: 7 additions & 4 deletions catalyst/callbacks/soft_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,17 @@ def __init__(
"""Init.

Args:
target_model_key: key to the data inside `runner.model` to update
source_model_key: key to the source data inside `runner.model`
tau: smoothing parameter `target * (1.0 - tau) + source * tau`
scope (str): when the `target` should be updated
target_model_key: key to the data inside `runner.model` to update
source_model_key: key to the source data inside `runner.model`
tau: smoothing parameter `target * (1.0 - tau) + source * tau`
scope (str): when the `target` should be updated
``"on_batch_end"``
``"on_batch_start"``
``"on_epoch_end"``
``"on_epoch_start"``

Raises:
TypeError: if invalid scope
"""
super().__init__(order=CallbackOrder.Metric)
self.target_model_key = target_model_key
Expand Down
6 changes: 4 additions & 2 deletions catalyst/contrib/data/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ def __call__(self, element):
element: elem in your dataset

Returns:
Data object used for your neural network
Data object used for your neural network # noqa: DAR202

Raises:
NotImplementedError: you should implement it
"""
raise NotImplementedError("You cannot apply a transformation using `BaseReader`")

Expand Down Expand Up @@ -145,7 +148,6 @@ def __init__(self, transforms: List[IReader]):
"""
Args:
transforms: list of reader to compose
mixins: list of mixins to use
"""
self.transforms = transforms

Expand Down
3 changes: 3 additions & 0 deletions catalyst/contrib/datasets/cv/imagecar.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ def __init__(
transforms: (callable, optional): A function/transform that
takes in an image and returns a transformed version.

Raises:
RuntimeError: If ``download is False`` and the dataset not found.

Examples:
>>> from catalyst.contrib.datasets import CarvanaOneCarDataset
>>> dataset = CarvanaOneCarDataset(root='./',
Expand Down
2 changes: 1 addition & 1 deletion catalyst/contrib/datasets/cv/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def __init__(self, root: str, train: bool = True, download: bool = False, **kwar
download: if ``True``, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again
**kwargs:
**kwargs: Keyword-arguments passed to ``super().__init__`` method.
"""
# downlad dataset if needed
if download and not os.path.exists(os.path.join(root, self.name)):
Expand Down
6 changes: 6 additions & 0 deletions catalyst/contrib/datasets/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ def __init__(self, root, train=True, transform=None, target_transform=None, down
takes in an image and returns a transformed version.
target_transform (callable, optional): A function/transform
that takes in the target and transforms it.

Raises:
RuntimeError: If ``download is False`` and the dataset not found.
"""
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
Expand Down Expand Up @@ -215,6 +218,9 @@ class MnistMLDataset(MetricLearningTrainDataset, MNIST):

def __init__(self, **kwargs):
"""
Args:
**kwargs: Keyword-arguments passed to ``super().__init__`` method.

Raises:
ValueError: if train argument is False (MnistMLDataset
should be used only for training)
Expand Down
5 changes: 4 additions & 1 deletion catalyst/contrib/datasets/movielens.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ def __init__(self, root, train=True, download=False, min_rating=0.0):
is already downloaded, it is not downloaded again.
min_rating (float, optional): Minimum rating to include in
the interaction matrix

Raises:
RuntimeError: If ``download is False`` and the dataset not found.
"""
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
Expand Down Expand Up @@ -196,7 +199,7 @@ def _parse(self, data):
Args:
data: raw data of the dataset

Returns:
Yields:
Generator iterator for parsed data
"""
for line in data:
Expand Down
1 change: 1 addition & 0 deletions catalyst/contrib/models/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ def __init__(self, out_features: int, normalize: bool = True):
"""
Args:
out_features: size of the output tensor
normalize: boolean flag to add normalize layer
"""
super().__init__()
layers = [
Expand Down
2 changes: 2 additions & 0 deletions catalyst/contrib/nn/criterion/supervised_contrastive.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@ def __init__(self, tau: float, reduction: str = "mean", pos_aggregation="in") ->
``"in"`` | ``"out"``.
``"in"``: maximization of log(average positive exponentiate similarity)
``"out"``: maximization of average positive similarity

Raises:
ValueError: if reduction is not mean, sum or none
ValueError: if positive aggregation is not in or out
"""
super().__init__()
self.tau = tau
Expand Down
3 changes: 3 additions & 0 deletions catalyst/contrib/nn/optimizers/lamb.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ def __init__(
(default: 0)
adam (bool, optional): always use trust ratio = 1, which turns
this into Adam. Useful for comparison purposes.

Raises:
ValueError: if invalid learning rate, epsilon value or betas.
"""
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
Expand Down
4 changes: 4 additions & 0 deletions catalyst/contrib/nn/optimizers/qhadamw.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ def __init__(
weight_decay (float, optional): weight decay
(L2 regularization coefficient, times two)
(default: 0.0)

Raises:
ValueError: if invalid learning rate, epsilon value, betas or
weight_decay value.
"""
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
Expand Down
3 changes: 3 additions & 0 deletions catalyst/data/dataset/metric_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ def __getitem__(self, item) -> Dict[str, torch.Tensor]:
be boolean and indicate whether current object
is in query or in gallery.

Args:
item: Item

Raises:
NotImplementedError: You should implement it # noqa: DAR402
"""
Expand Down
3 changes: 3 additions & 0 deletions catalyst/data/dataset/self_supervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ def __init__(
transform_original: transforms which will be applied to save original in batch
is_target: the flag for selection does dataset return (sample, target) or only sample

Raises:
ValueError: should be specified transform_left and transform_right simultaneously
or only transforms
"""
super().__init__()

Expand Down
3 changes: 3 additions & 0 deletions catalyst/data/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,9 @@ def __next__(self):

Returns:
next batch

Raises:
StopIteration: if iteration_index >= len(origin)
"""
if self.iteration_index >= len(self.origin):
raise StopIteration()
Expand Down
14 changes: 7 additions & 7 deletions catalyst/data/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,8 @@ def __init__(self, labels: List[int], mode: Union[str, int] = "downsampling"):

def __iter__(self) -> Iterator[int]:
"""
Yields:
indices of stratified sample
Returns:
iterator of indices of stratified sample
"""
indices = []
for key in sorted(self.lbl2idx):
Expand Down Expand Up @@ -487,8 +487,8 @@ def _exp_scheduler(self) -> float:

def __iter__(self) -> Iterator[int]:
"""
Yields:
indices of stratified sample
Returns:
iterator of indices of stratified sample
"""
indices = []
for key in sorted(self.label2idxes):
Expand Down Expand Up @@ -708,11 +708,11 @@ def __init__(
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler), num_replicas=num_replicas, rank=rank, shuffle=shuffle
Expand Down
3 changes: 2 additions & 1 deletion catalyst/data/sampler_inbatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ def _sample(self, *_: Tensor, labels: List[int]) -> TTripletsIds:
labels: labels of the samples in the batch
*_: note, that we ignore features argument

Returns: indices of triplets
Returns:
indices of triplets
"""
num_labels = len(labels)

Expand Down
10 changes: 9 additions & 1 deletion catalyst/loggers/mlflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

import numpy as np

from mlflow.tracking.fluent import ActiveRun

from catalyst.core.logger import ILogger
from catalyst.settings import SETTINGS
from catalyst.typing import Directory, File, Union
Expand All @@ -11,9 +13,15 @@
import mlflow


def _get_or_start_run(run_name):
def _get_or_start_run(run_name: Optional[str]) -> ActiveRun:
"""The function of MLflow. Gets the active run and gives it a name.
If active run does not exist, starts a new one.

Args:
run_name: Name of the run

Returns:
ActiveRun
"""
active_run = mlflow.active_run()
if active_run:
Expand Down
9 changes: 4 additions & 5 deletions catalyst/metrics/_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ class StatisticsMetric(ICallbackBatchMetric):
prefix: metric prefix
suffix: metric suffix

Raises:
ValueError: if mode is incorrect

Examples:

.. code-block:: python
Expand Down Expand Up @@ -100,11 +103,7 @@ def __init__(
prefix: Optional[str] = None,
suffix: Optional[str] = None,
):
"""Init params

Raises:
ValueError: if mode is incorrect
"""
"""Init params"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
if mode == "binary":
self.statistics_fn = get_binary_statistics
Expand Down
4 changes: 4 additions & 0 deletions catalyst/metrics/_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ def __call__(self, *args, **kwargs) -> Any:
(`on_batch_end` event).
Returns computed value if `compute_on_call=True`.

Args:
*args: Arguments passed to update method.
**kwargs: Keyword-arguments passed to update method.

Returns:
Any: computed value, it's better to return key-value.
"""
Expand Down