Skip to content
This repository has been archived by the owner on Apr 19, 2023. It is now read-only.

Commit

Permalink
- repurpose auto lr decay
Browse files Browse the repository at this point in the history
  • Loading branch information
nasimrahaman committed Sep 1, 2017
1 parent 9349cb6 commit 9c6a07e
Showing 1 changed file with 19 additions and 7 deletions.
26 changes: 19 additions & 7 deletions inferno/trainers/callbacks/scheduling.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ...utils.train_utils import Duration, MovingAverage
from ...utils.train_utils import Frequency, Duration, MovingAverage
from ...utils import python_utils as pyu
from ...utils.exceptions import assert_, NotSetError
from .base import Callback
Expand Down Expand Up @@ -90,9 +90,9 @@ def maintain_monitor_moving_average(self):
return monitor_value


class AutoLRDecay(_Scheduler):
class AutoLR(_Scheduler):
"""
Callback to decay the learning rate automatically when a specified monitor
Callback to decay or hike the learning rate automatically when a specified monitor
stops improving.
The monitor should be decreasing, i.e. lower value --> better performance.
Expand All @@ -104,7 +104,9 @@ def __init__(self, factor, patience, required_minimum_relative_improvement=0,
Parameters
----------
factor : float
Factor to decay the learning rate by. Should be between 0 and 1.
Factor to multiply the learning rate with when out of patience
and not in cooldown. Setting `factor < 1` results in a LR decay,
whereas setting `factor > 1` results in a LR hike.
patience : str or tuple or inferno.utils.train_utils.Duration
Specifies how long to wait for an improvement before a LR decay is triggered.
required_minimum_relative_improvement : float
Expand All @@ -129,8 +131,8 @@ def __init__(self, factor, patience, required_minimum_relative_improvement=0,
verbose : bool
Specifies if a message be printed before decaying.
"""
super(AutoLRDecay, self).__init__(monitor=monitor, monitor_momentum=monitor_momentum,
monitor_while=monitor_while)
super(AutoLR, self).__init__(monitor=monitor, monitor_momentum=monitor_momentum,
monitor_while=monitor_while)
# Privates
self._patience = None
self._cooldown = None
Expand Down Expand Up @@ -229,7 +231,7 @@ def decay(self):
'epoch_count': self.trainer.epoch_count})

def maintain_monitor_moving_average(self):
monitor_value = super(AutoLRDecay, self).maintain_monitor_moving_average()
monitor_value = super(AutoLR, self).maintain_monitor_moving_average()
if self._best_monitor_value is None:
self._best_monitor_value = monitor_value

Expand Down Expand Up @@ -288,3 +290,13 @@ def is_significantly_less_than(x, y, min_relative_delta):
return False
relative_delta = abs(y - x) / abs(y)
return relative_delta > min_relative_delta


class AutoLRDecay(AutoLR):
"""
Callback to decay the learning rate automatically when a specified monitor
stops improving.
The monitor should be decreasing, i.e. lower value --> better performance.
"""
pass

0 comments on commit 9c6a07e

Please sign in to comment.