Skip to content

Commit

Permalink
Merge pull request #45 from shan18/development
Browse files Browse the repository at this point in the history
add cyclic lr scheduler support
  • Loading branch information
shan18 committed Oct 29, 2021
2 parents 1349082 + 332d65a commit cc317c4
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 4 deletions.
2 changes: 1 addition & 1 deletion README.md
Expand Up @@ -2,7 +2,7 @@
<img src="https://raw.githubusercontent.com/shan18/TensorNet/master/images/tensornet_logo.png" alt="tensornet" />
<br />
<br />
<img src="https://img.shields.io/badge/version-1.2.1-blue.svg" alt="Version">
<img src="https://img.shields.io/badge/version-1.2.2-blue.svg" alt="Version">
<a href='https://tensornet.readthedocs.io/en/latest/?badge=latest'><img src='https://readthedocs.org/projects/tensornet/badge/?version=latest' alt='Documentation Status' /></a>
<a href="https://github.com/shan18/TensorNet/blob/master/LICENSE"><img src="https://img.shields.io/apm/l/atomic-design-ui.svg?" alt="MIT License"></a>
<br />
Expand Down
2 changes: 1 addition & 1 deletion tensornet/__init__.py
@@ -1,2 +1,2 @@

__version__ = '1.2.1'
__version__ = '1.2.2'
9 changes: 8 additions & 1 deletion tensornet/engine/learner.py
Expand Up @@ -58,6 +58,7 @@ def __init__(
'step_lr': None,
'lr_plateau': None,
'one_cycle_policy': None,
'cyclic_lr': None,
}
self.checkpoint = None
self.summary_writer = None
Expand Down Expand Up @@ -101,6 +102,8 @@ def _setup_callbacks(self, callbacks):
self.checkpoint = callback
elif isinstance(callback, TensorBoard):
self.summary_writer = callback
elif isinstance(callback, torch.optim.lr_scheduler.CyclicLR):
self.lr_schedulers['cyclic_lr'] = callback

def set_model(self, model):
"""Assign model to learner.
Expand Down Expand Up @@ -403,9 +406,13 @@ def train_batch(self, data):
self._calculate_metrics(targets, y_pred)

# One Cycle Policy for learning rate
if not self.lr_schedulers['one_cycle_policy'] is None:
if self.lr_schedulers['one_cycle_policy'] is not None:
self.lr_schedulers['one_cycle_policy'].step()

# Cyclic LR policy
if self.lr_schedulers['cyclic_lr'] is not None:
self.lr_schedulers['cyclic_lr'].step()

return loss.item()

def train_epoch(self, verbose=True):
Expand Down
46 changes: 45 additions & 1 deletion tensornet/engine/ops/lr_scheduler.py
@@ -1,4 +1,4 @@
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR, CyclicLR


def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1):
Expand Down Expand Up @@ -67,3 +67,47 @@ def one_cycle_lr(
optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch,
pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor
)


def cyclic_lr(
optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0,
scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9,
last_epoch=- 1, verbose=False
):
"""Create Cyclic LR Policy.
Args:
optimizer (torch.optim): Model optimizer.
base_lr (float): Lower learning rate boundary in the cycle.
max_lr (float): Upper learning rate boundary in the cycle.
step_size_up (int): Number of training iterations in the increasing half of a cycle.
(default: 2000)
step_size_down (int): Number of training iterations in the decreasing half of a cycle.
If step_size_down is None, it is set to step_size_up. (default: None)
mode (str): One of `triangular`, `triangular2`, `exp_range`. If scale_fn is not None,
this argument is ignored. (default: ‘triangular’)
gamma (float): Constant in ‘exp_range’ scaling function: gamma**(cycle iterations).
(default: 1.0)
scale_fn: Custom scaling policy defined by a single argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0. If specified, then ‘mode’ is ignored.
(default: None)
scale_mode (str): ‘cycle’, ‘iterations’. Defines whether scale_fn is evaluated on cycle
number or cycle iterations (training iterations since start of cycle).
(default: ‘cycle’)
cycle_momentum (bool): If True, momentum is cycled inversely to learning rate between
‘base_momentum’ and ‘max_momentum’. (default: True)
base_momentum (float): Lower momentum boundaries in the cycle. (default: 0.8)
max_momentum (float): Upper momentum boundaries in the cycle. Functionally, it defines
the cycle amplitude (max_momentum - base_momentum). (default: 0.9)
last_epoch (int): The index of the last batch. This parameter is used when resuming a
training job.(default: -1)
verbose (bool): If True, prints a message to stdout for each update. (default: False)
Returns:
CyclicLR instance.
"""
return CyclicLR(
optimizer, base_lr, max_lr, step_size_up=step_size_up, step_size_down=step_size_down,
mode=mode, gamma=gamma, scale_fn=scale_fn, scale_mode=scale_mode, cycle_momentum=cycle_momentum,
base_momentum=base_momentum, max_momentum=max_momentum, last_epoch=last_epoch, verbose=verbose
)

0 comments on commit cc317c4

Please sign in to comment.