/
ada_grad.py
90 lines (66 loc) · 2.49 KB
/
ada_grad.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.001
_default_hyperparam.eps = 1e-8
class AdaGradRule(optimizer.UpdateRule):
"""Update rule of AdaGrad.
See :class:`~chainer.optimizers.AdaGrad` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, eps=None):
super(AdaGradRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
xp = backend.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['h'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
lr = self.hyperparam.lr
eps = self.hyperparam.eps
h = self.state['h']
h += grad * grad
param.data -= lr * grad / (numpy.sqrt(h) + eps)
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if AdaGradRule._kernel is None:
AdaGradRule._kernel = cuda.elementwise(
'T grad, T lr, T eps',
'T param, T h',
'''h += grad * grad;
param -= lr * grad / (sqrt(h) + eps);''',
'adagrad')
AdaGradRule._kernel(grad, self.hyperparam.lr, self.hyperparam.eps,
param.data, self.state['h'])
class AdaGrad(optimizer.GradientMethod):
"""AdaGrad optimizer.
See: http://jmlr.org/papers/v12/duchi11a.html
Args:
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
def __init__(self, lr=_default_hyperparam.lr, eps=_default_hyperparam.eps):
super(AdaGrad, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.eps = eps
lr = optimizer.HyperparameterProxy('lr')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return AdaGradRule(self.hyperparam)