Skip to content

Commit

Permalink
Merge branch 'master' into add-colab-badges
Browse files Browse the repository at this point in the history
  • Loading branch information
tchaton committed Dec 14, 2020
2 parents 3b7875e + 0327f6b commit 320c1bb
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 17 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Fixed `LightningOptimizer` exposes optimizer attributes ([#5095](https://github.com/PyTorchLightning/pytorch-lightning/pull/5095))


- Do not warn when the `name` key is used in the `lr_scheduler` dict ([#5057](https://github.com/PyTorchLightning/pytorch-lightning/pull/5057))


## [1.1.0] - 2020-12-09

Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/callbacks/lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def _find_names(self, lr_schedulers) -> List[str]:
names = []
for scheduler in lr_schedulers:
sch = scheduler['scheduler']
if 'name' in scheduler:
if scheduler['name'] is not None:
name = scheduler['name']
else:
opt_name = 'lr-' + sch.optimizer.__class__.__name__
Expand Down
13 changes: 7 additions & 6 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ def configure_optimizers(
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key which value is a single LR scheduler or lr_dict.
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Expand All @@ -1002,21 +1002,22 @@ def configure_optimizers(
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains scheduler and its associated configuration.
It has five keys. The default configuration is shown below.
The lr_dict is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR schduler
'scheduler': lr_scheduler, # The LR scheduler instance (required)
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True # Whether to crash the training if `monitor` is not found
'strict': True, # Whether to crash the training if `monitor` is not found
'name': None, # Custom name for LearningRateMonitor to use
}
If user only provides LR schedulers, then their configuration will set to default as shown above.
Only the ``scheduler`` key is required, the rest will be set to the defaults above.
Examples:
.. code-block:: python
Expand Down
1 change: 1 addition & 0 deletions pytorch_lightning/trainer/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def configure_schedulers(self, schedulers: list, monitor: Optional[str] = None):
lr_schedulers = []
default_config = {
'scheduler': None,
'name': None, # no custom name
'interval': 'epoch', # after epoch is over
'frequency': 1, # every epoch/batch
'reduce_on_plateau': False, # most often not ReduceLROnPlateau scheduler
Expand Down
32 changes: 27 additions & 5 deletions tests/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@
# limitations under the License.
import pytest

import tests.base.develop_utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
import tests.base.develop_utils as tutils
from tests.base import BoringModel, EvalModelTemplate


def test_lr_monitor_single_lr(tmpdir):
Expand All @@ -43,7 +43,7 @@ def test_lr_monitor_single_lr(tmpdir):
'Momentum should not be logged by default'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert all([k in ['lr-Adam'] for k in lr_monitor.lrs.keys()]), \
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['lr-Adam'], \
'Names of learning rates not set correctly'


Expand Down Expand Up @@ -134,7 +134,7 @@ def test_lr_monitor_multi_lrs(tmpdir, logging_interval):
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert all([k in ['lr-Adam', 'lr-Adam-1'] for k in lr_monitor.lrs.keys()]), \
assert lr_monitor.lr_sch_names == ['lr-Adam', 'lr-Adam-1'], \
'Names of learning rates not set correctly'

if logging_interval == 'step':
Expand Down Expand Up @@ -167,5 +167,27 @@ def test_lr_monitor_param_groups(tmpdir):
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == 2 * len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of param groups'
assert all([k in ['lr-Adam/pg1', 'lr-Adam/pg2'] for k in lr_monitor.lrs.keys()]), \
assert lr_monitor.lr_sch_names == ['lr-Adam']
assert list(lr_monitor.lrs.keys()) == ['lr-Adam/pg1', 'lr-Adam/pg2'], \
'Names of learning rates not set correctly'


def test_lr_monitor_custom_name(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer, [scheduler] = super().configure_optimizers()
lr_scheduler = {'scheduler': scheduler, 'name': 'my_logging_name'}
return optimizer, [lr_scheduler]

lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(TestModel())
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['my_logging_name']
34 changes: 29 additions & 5 deletions tests/trainer/test_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import torch

from pytorch_lightning import Callback, Trainer
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
from tests.base.boring_model import BoringModel
Expand Down Expand Up @@ -177,6 +176,7 @@ def test_reducelronplateau_scheduling(tmpdir):
frequency=1,
reduce_on_plateau=True,
strict=True,
name=None,
), 'lr scheduler was not correctly converted to dict'


Expand Down Expand Up @@ -215,7 +215,13 @@ def test_optimizer_return_options(enable_pl_optimizer):
assert len(freq) == 0
assert optim[0] == opt_a
assert lr_sched[0] == dict(
scheduler=scheduler_a, interval='epoch', frequency=1, reduce_on_plateau=False, monitor=None, strict=True
scheduler=scheduler_a,
interval='epoch',
frequency=1,
reduce_on_plateau=False,
monitor=None,
strict=True,
name=None,
)

# opt tuple of 1 list
Expand All @@ -225,7 +231,13 @@ def test_optimizer_return_options(enable_pl_optimizer):
assert len(freq) == 0
assert optim[0] == opt_a
assert lr_sched[0] == dict(
scheduler=scheduler_a, interval='epoch', frequency=1, reduce_on_plateau=False, monitor=None, strict=True
scheduler=scheduler_a,
interval='epoch',
frequency=1,
reduce_on_plateau=False,
monitor=None,
strict=True,
name=None,
)

# opt single dictionary
Expand All @@ -235,7 +247,13 @@ def test_optimizer_return_options(enable_pl_optimizer):
assert len(freq) == 0
assert optim[0] == opt_a
assert lr_sched[0] == dict(
scheduler=scheduler_a, interval='epoch', frequency=1, reduce_on_plateau=False, monitor=None, strict=True
scheduler=scheduler_a,
interval='epoch',
frequency=1,
reduce_on_plateau=False,
monitor=None,
strict=True,
name=None,
)

# opt multiple dictionaries with frequencies
Expand All @@ -247,7 +265,13 @@ def test_optimizer_return_options(enable_pl_optimizer):
assert len(optim) == len(lr_sched) == len(freq) == 2
assert optim[0] == opt_a
assert lr_sched[0] == dict(
scheduler=scheduler_a, interval='epoch', frequency=1, reduce_on_plateau=False, monitor=None, strict=True
scheduler=scheduler_a,
interval='epoch',
frequency=1,
reduce_on_plateau=False,
monitor=None,
strict=True,
name=None,
)
assert freq == [1, 5]

Expand Down

0 comments on commit 320c1bb

Please sign in to comment.