Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Readme update2 #1142

Merged
merged 10 commits into from
Mar 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).


## [XX.XX] - XXXX-XX-XX
## [21.03.1] - XXXX-XX-XX

### Added

Expand All @@ -21,6 +21,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Engine configuration through cmd ([#1134](https://github.com/catalyst-team/catalyst/issues/1134))
- Extra utils for thresholds ([#1134](https://github.com/catalyst-team/catalyst/issues/1134))
- Added gradient clipping function to optimizer callback ([1124](https://github.com/catalyst-team/catalyst/pull/1124))
- FactorizedLinear to contrib ([1142](https://github.com/catalyst-team/catalyst/pull/1142))
- Extra init params for ``ConsoleLogger`` ([1142](https://github.com/catalyst-team/catalyst/pull/1142))

### Changed

Expand All @@ -29,12 +31,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

### Removed


- Config API deprecated parsings logic ([1142](https://github.com/catalyst-team/catalyst/pull/1142)) ([1138](https://github.com/catalyst-team/catalyst/pull/1138))

### Fixed

- Data-Model device sync and ``Engine`` logic during `runner.predict_loader` ([#1134](https://github.com/catalyst-team/catalyst/issues/1134))
- BatchLimitLoaderWrapper logic for loaders with shuffle flag ([#1136](https://github.com/catalyst-team/catalyst/issues/1136))
- BatchLimitLoaderWrapper logic for loaders with shuffle flag ([#1136](https://github.com/catalyst-team/catalyst/issues/1136))
- config description in the examples ([1142](https://github.com/catalyst-team/catalyst/pull/1142))
- Config API deprecated parsings logic ([1142](https://github.com/catalyst-team/catalyst/pull/1142)) ([1138](https://github.com/catalyst-team/catalyst/pull/1138))
- RecSys metrics Top_k calculations ([#1140] (https://github.com/catalyst-team/catalyst/pull/1140))

## [21.03] - 2021-03-13 ([#1095](https://github.com/catalyst-team/catalyst/issues/1095))
Expand Down
1 change: 1 addition & 0 deletions catalyst/contrib/nn/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
)
from catalyst.contrib.nn.modules.cosface import CosFace, AdaCos
from catalyst.contrib.nn.modules.curricularface import CurricularFace
from catalyst.contrib.nn.modules.factorized import FactorizedLinear
from catalyst.contrib.nn.modules.lama import (
LamaPooling,
TemporalLastPooling,
Expand Down
52 changes: 52 additions & 0 deletions catalyst/contrib/nn/modules/factorized.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from typing import Union

import torch
from torch import nn


class FactorizedLinear(nn.Module):

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[pep8] reported by reviewdog 🐶
D101 Missing docstring in public class

"""Factorized wrapper for ``nn.Linear``

Args:
nn_linear: torch ``nn.Linear`` module
dim_ratio: dimension ration to use after weights SVD
"""

def __init__(self, nn_linear: nn.Linear, dim_ratio: Union[int, float] = 1.0):
super().__init__()
self.bias = nn.parameter.Parameter(nn_linear.bias.data, requires_grad=True)
u, vh = self._spectral_init(nn_linear.weight.data, dim_ratio=dim_ratio)
# print(f"Doing SVD of tensor {or_linear.weight.shape}, U: {u.shape}, Vh: {vh.shape}")
self.u = nn.parameter.Parameter(u, requires_grad=True)
self.vh = nn.parameter.Parameter(vh, requires_grad=True)
self.dim_ratio = dim_ratio
self.in_features = u.size(0)
self.out_features = vh.size(1)

@staticmethod
def _spectral_init(m, dim_ratio: Union[int, float] = 1):
u, s, vh = torch.linalg.svd(m, full_matrices=False)
u = u @ torch.diag(torch.sqrt(s))
vh = torch.diag(torch.sqrt(s)) @ vh
if dim_ratio < 1:
dims = int(u.size(1) * dim_ratio)
u = u[:, :dims]
vh = vh[:dims, :]
# s_share = s[:dims].sum() / s.sum() * 100
# print(f"SVD eigenvalue share {s_share:.2f}%")
return u, vh

def extra_repr(self) -> str:

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[pep8] reported by reviewdog 🐶
D102 Missing docstring in public method

"""Extra representation log."""
return (
f"in_features={self.in_features}, "
f"out_features={self.out_features}, "
f"bias=True, dim_ratio={self.dim_ratio}"
)

def forward(self, x: torch.Tensor):
"""Forward call."""
return x @ (self.u @ self.vh).transpose(0, 1) + self.bias


__all__ = ["FactorizedLinear"]
2 changes: 1 addition & 1 deletion catalyst/engines/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def backward_loss(self, loss, model, optimizer) -> None:
def optimizer_step(self, loss, model, optimizer) -> None:
"""Abstraction over ``optimizer.step()`` step."""
optimizer.step()
dist.barrier()
# dist.barrier()


__all__ = ["DeviceEngine", "DataParallelEngine", "DistributedDataParallelEngine"]
30 changes: 21 additions & 9 deletions catalyst/loggers/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,24 @@ def _format_metrics(dct: Dict):


class ConsoleLogger(ILogger):
"""Console logger for parameters and metrics. Used by default during all runs."""
"""Console logger for parameters and metrics. Used by default during all runs.

Args:
log_hparams: boolean flag to print all hparams to the console (default: False)
log_loader_metrics: boolean flag to print loader metrics to the console (default: True)
log_epoch_metrics: boolean flag to print epoch metrics to the console (default: True)
"""

def __init__(
self,
log_hparams: bool = False,
log_loader_metrics: bool = True,
log_epoch_metrics: bool = True,
):
super().__init__()
self._log_hparams = log_hparams
self._log_loader_metrics = log_loader_metrics
self._log_epoch_metrics = log_epoch_metrics

def log_metrics(
self,
Expand All @@ -33,16 +50,11 @@ def log_metrics(
loader_sample_step: int = 0,
) -> None:
"""Logs loader and epoch metrics to stdout."""
# if self.exclude is not None and scope in self.exclude:
# return
# elif (
# self.include is not None and scope in self.include
# ) or self.include is None:
if scope == "loader":
if scope == "loader" and self._log_loader_metrics:
prefix = f"{loader_key} ({stage_epoch_step}/{stage_epoch_len}) "
msg = prefix + _format_metrics(metrics)
print(msg)
elif scope == "epoch":
elif scope == "epoch" and self._log_epoch_metrics:
# @TODO: trick to save pure epoch-based metrics, like lr/momentum
prefix = f"* Epoch ({stage_epoch_step}/{stage_epoch_len}) "
msg = prefix + _format_metrics(metrics["_epoch_"])
Expand All @@ -64,7 +76,7 @@ def log_hparams(
run_key: Experiment info.
stage_key: Stage info.
"""
if scope == "experiment":
if scope == "experiment" and self._log_hparams:
print(f"Hparams ({run_key}): {hparams}")


Expand Down
15 changes: 6 additions & 9 deletions catalyst/runners/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def _get_optimizer_from_params(
self, model: RunnerModel, stage: str, **params
) -> RunnerOptimizer:
# @TODO 1: refactor; this method is too long

params = deepcopy(params)
# learning rate linear scaling
lr_scaling_params = params.pop("lr_linear_scaling", None)
if lr_scaling_params:
Expand Down Expand Up @@ -290,11 +290,6 @@ def get_optimizer(self, model: RunnerModel, stage: str) -> RunnerOptimizer:
if is_key_value:
optimizer = {}
for key, params in optimizer_params.items():
# load specified optimizer from checkpoint
optimizer_key = "_optimizer"
assert optimizer_key not in params, "keyword reserved"
params[optimizer_key] = key

optimizer[key] = self._get_optimizer_from_params(
model=model, stage=stage, **params
)
Expand All @@ -310,16 +305,18 @@ def _get_scheduler_from_params(*, optimizer: RunnerOptimizer, **params) -> Runne
params = deepcopy(params)

is_key_value = params.pop("_key_value", False)
optimizer_key = params.pop("_optimizer", None)
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer

if is_key_value:
scheduler: Dict[str, Scheduler] = {}
for key, scheduler_params in params.items():
scheduler_params = deepcopy(scheduler_params)
optimizer_key = scheduler_params.pop("_optimizer", None)
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer
scheduler[key] = ConfigRunner._get_scheduler_from_params(
**scheduler_params, optimizer=optimizer
) # noqa: WPS437
else:
optimizer_key = params.pop("_optimizer", None)
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer
scheduler = REGISTRY.get_from_params(**params, optimizer=optimizer)
return scheduler

Expand Down
16 changes: 7 additions & 9 deletions catalyst/runners/hydra.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def _get_criterion_from_params(params: DictConfig) -> RunnerCriterion:
return criterion

def get_criterion(self, stage: str) -> RunnerCriterion:
"""Returns the criterions for a given stage."""
"""Returns the criterion for a given stage."""
if "criterion" not in self._config.stages[stage]:
return None
criterion_params: DictConfig = self._config.stages[stage].criterion
Expand Down Expand Up @@ -284,16 +284,12 @@ def get_optimizer(self, model: RunnerModel, stage: str) -> RunnerOptimizer:
return None

optimizer_params: DictConfig = self._config.stages[stage].optimizer
optimizer_params = deepcopy(optimizer_params)
is_key_value = optimizer_params._key_value or False

if is_key_value:
optimizer = {}
for key, params in optimizer_params.items():
# load specified optimizer from checkpoint
optimizer_key = "_optimizer"
assert optimizer_key not in params, "keyword reserved"
params[optimizer_key] = key

optimizer[key] = self._get_optimizer_from_params(
model=model, stage=stage, params=params
)
Expand All @@ -310,16 +306,18 @@ def _get_scheduler_from_params(
) -> RunnerScheduler:
params = deepcopy(params)
is_key_value = params._key_value or False
optimizer_key = params._optimizer or None
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer

if is_key_value:
scheduler: Dict[str, Scheduler] = {}
for key, scheduler_params in params.items():
scheduler_params = deepcopy(scheduler_params)
optimizer_key = scheduler_params._optimizer or None
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer
scheduler[key] = HydraRunner._get_scheduler_from_params( # noqa: WPS437
optimizer=optimizer, params=scheduler_params
)
else:
optimizer_key = params._optimizer or None
optimizer = optimizer[optimizer_key] if optimizer_key else optimizer
scheduler = hydra.utils.instantiate(params, optimizer=optimizer)

return scheduler
Expand Down
Loading