/
sgd.py
60 lines (41 loc) · 1.5 KB
/
sgd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from chainer import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.01
class SGDRule(optimizer.UpdateRule):
"""Update rule of vanilla stochastic gradient descent.
See :class:`~chainer.optimizers.SGD` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
"""
def __init__(self, parent_hyperparam=None, lr=None):
super(SGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
param.data -= self.hyperparam.lr * grad
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
cuda.elementwise('T grad, T lr', 'T param',
'param -= lr * grad',
'sgd')(grad, self.hyperparam.lr, param.data)
class SGD(optimizer.GradientMethod):
"""Vanilla Stochastic Gradient Descent.
Args:
lr (float): Learning rate.
"""
def __init__(self, lr=_default_hyperparam.lr):
super(SGD, self).__init__()
self.hyperparam.lr = lr
lr = optimizer.HyperparameterProxy('lr')
def create_update_rule(self):
return SGDRule(self.hyperparam)