-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_completed.py
55 lines (44 loc) · 1.48 KB
/
test_completed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import numpy as np
# TODO 1: implement the backward pass of Relu
class Relu():
"""
Relu activation.
The forward pass receives the input data (array) and exchanges any negative
entry for zero.
The backward pass should calculate the gradient of the maximum function in
the forward pass and return it
"""
def forward(self, x):
self._hidden = np.maximum(0, x)
return self._hidden
def backward(self, x):
grad = (x > 0) * 1.0
return grad
# TODO 2: implement the update equation of a gradient descent optimizer
class GradientDescent():
"""
Simple gradient-descent optimizer.
"""
def step(self, weights, grad, step_size):
weights = weights - (step_size * grad)
return weights
if __name__ == "__main__":
# Let's run some tests with the new functions.
iterations = 1
step_size = 0.01
# Initialize weights
weights = 0.01 * np.ones((20, 100))
# Initialize artificial gradients
grads = [0.5/(i+1) * np.ones((20, 100)) for i in range(iterations)]
grads = np.array(grads)
# Instantiate optimizer and activation
optimizer = GradientDescent()
relu = Relu()
for i in range(iterations):
print('weights = ' + str(weights))
relu.forward(weights)
print('weights = ' + str(weights))
grad = relu.backward(weights)
weights = optimizer.step(weights, grad, step_size)
print('weights = ' + str(weights))
print('grad =' + str(grad))