-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
143 lines (109 loc) · 3.92 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import matplotlib.pyplot as plt
import numpy as np
def graph_error(means, stds, labels, legends, title, show=True):
"""
Create a plot with error bar
@param means: mean data
@param stds: standard deviation
@param labels: labels the axises
@param legends: legends for the plot
@param title: title of the plot
@param show: if the plot should be displayed right away. Set to false to display multiple graphs in same plot
"""
for i in range(2):
plt.errorbar(np.arange(1, len(means[i])+1), means[i], yerr=stds[i])
plt.title(title)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.legend(legends)
if show:
plt.show()
def graph_plot(data, labels, legends, title, show=True):
"""
Plot multiple graphs in same plot
@param data: data of the graphs to be plotted
@param labels: x- and y-label
@param legends: legends for the graphs
"""
x = np.arange(1, len(data[0]) + 1)
for to_plot in data:
plt.plot(x, to_plot)
plt.title(title)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.legend(legends)
if show:
plt.show()
def plot_loss(model, title, show=True):
"""
Plot loss and accuracy graphs
@param model: trained model to plot
"""
# plot the loss
graph_plot([model.training_loss, model.validation_loss],
["Epoch", "Cross-entropy loss"], ["Training loss", "Validation loss"], title, show)
def plot_acc(model, title, show=True):
# plot the accuracy
graph_plot([model.training_acc, model.validation_acc],
["Epoch", "Accuracy"], ["Training accuracy", "Validation accuracy"], title, show)
def plot(model, title=""):
plot_loss(model, title, show=True)
plot_acc(model, title, show=True)
def multi_plots(models, names):
"""
Plot loss and accuracy graphs
@param model: trained model to plot
"""
# plot the loss
losses = []
loss_labels = []
acc = []
for model, name in zip(models, names):
losses += model.training_loss
losses += model.validation_loss
loss_labels += "Training loss - " + name
loss_labels += "validation loss - " + name
acc += model.training_acc
acc += model.validation_acc
graph_plot(losses,
["Epoch", "Cross-entropy loss"], loss_labels)
# plot the accuracy
graph_plot([model.training_acc, model.validation_acc],
["Epoch", "Accuracy"], ["Training accuracy", "Validation accuracy"])
def numerical_approximation(x_data, y_data, model, layer_idx, node_idx, col, bias=False):
"""
Calculate the numerical approximation
@param x_data: some example data
@param y_data: labels for the example data
@param model: model used
@param layer_idx: index of layer to calculate approximation
@param node_idx: index of node in layer to calculate approximation
"""
eps = 0.01
layer = model.layers[layer_idx]
vec = layer.b if bias else layer.w
# extract modify weights
# w + eps
vec[node_idx][col] += eps
# forward pass and calculate loss
loss_1, _ = model.forward(x_data, y_data)
# undo
vec[node_idx][col] -= eps
# w - eps
vec[node_idx][col] -= eps
# forward pass and calculate loss
loss_2, _ = model.forward(x_data, y_data)
# undo
vec[node_idx][col] += eps
# backprop
model.forward(x_data, y_data)
model.backward()
numerical_grad = -((loss_1 - loss_2) / (2 * eps))
# Loss is divided by number of images, therefore multiplied by it here
if not bias:
numerical_grad *= len(x_data)
backprop_grad = layer.d_b[node_idx][col] if bias else layer.d_w[node_idx][col]
print("\nLayer: {}, node: {} pixel: {}".format(layer_idx, node_idx, col))
print("Gradient difference: {}".format(abs(abs(numerical_grad) - abs(backprop_grad))))
print("Numerical approximation: {}".format(numerical_grad))
print("Backprop: {}".format(backprop_grad))