-
Notifications
You must be signed in to change notification settings - Fork 534
/
sgd.py
94 lines (78 loc) · 3.22 KB
/
sgd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# -*- coding: utf-8 -*-
import os
from typing import Iterable, Union
from ..functional.inplace import _inplace_add_
from ..tensor import Parameter, tensor
from .optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`"On the importance of initialization and momentum in deep learning" <http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_ .
Args:
params: iterable of parameters to optimize or dicts defining
parameter groups.
lr: learning rate.
momentum: momentum factor. Default: 0.0
nesterov: enables Nesterov momentum. Default: False
weight_decay: weight decay (L2 penalty). Default: 0.0
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
):
assert lr >= 0.0, "Invalid learning rate: {}".format(lr)
assert momentum >= 0.0, "Invalid momentum value: {}".format(momentum)
assert weight_decay >= 0.0, "Invalid weight_decay value: {}".format(
weight_decay
)
assert not nesterov or momentum > 0.0, "Nesterov momentum requires a momentum"
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr, dtype="float32")
_weight_decay = tensor(weight_decay, dtype="float32")
_momentum = tensor(momentum, dtype="float32")
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr, dtype="float32")
c1 = tensor(1.0)
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
param -= _lr * grad