forked from rahulkidambi/AccSGD
-
Notifications
You must be signed in to change notification settings - Fork 0
/
AccSGD.py
67 lines (60 loc) · 2.87 KB
/
AccSGD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from torch.optim.optimizer import Optimizer, required
import copy
class AccSGD(Optimizer):
r"""Implements the algorithm proposed in https://arxiv.org/pdf/1704.08227.pdf, which is a provably accelerated method
for stochastic optimization. This has been employed in https://openreview.net/forum?id=rJTutzbA- for training several
deep learning models of practical interest. This code has been implemented by building on the construction of the SGD
optimization module found in pytorch codebase.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
kappa (float, optional): ratio of long to short step (default: 1000)
xi (float, optional): statistical advantage parameter (default: 10)
smallConst (float, optional): any value <=1 (default: 0.7)
Example:
>>> from AccSGD import *
>>> optimizer = AccSGD(model.parameters(), lr=0.1, kappa = 1000.0, xi = 10.0)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""
def __init__(self, params, lr=required, kappa = 1000.0, xi = 10.0, smallConst = 0.7, weight_decay=0):
defaults = dict(lr=lr, kappa=kappa, xi=xi, smallConst=smallConst,
weight_decay=weight_decay)
super(AccSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(AccSGD, self).__setstate__(state)
def step(self, closure=None):
""" Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
large_lr = (group['lr']*group['kappa'])/(group['smallConst'])
Alpha = 1.0 - ((group['smallConst']*group['smallConst']*group['xi'])/group['kappa'])
Beta = 1.0 - Alpha
zeta = group['smallConst']/(group['smallConst']+Beta)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
param_state['momentum_buffer'] = copy.deepcopy(p.data)
buf = param_state['momentum_buffer']
buf.mul_((1.0/Beta)-1.0)
buf.add_(-large_lr,d_p)
buf.add_(p.data)
buf.mul_(Beta)
p.data.add_(-group['lr'],d_p)
p.data.mul_(zeta)
p.data.add_(1.0-zeta,buf)
return loss