@@ -38,7 +38,7 @@ function FFNNet(sizes::Int...)
38
38
# Create an Array of Neural Network Layers of the right sizes
39
39
# The first size corresponds to the input size of the network
40
40
layers = Array (FFNNLayer, length (sizes) - 1 )
41
- for i in 2 : length (sizes) - 1
41
+ @inbounds for i in 2 : length (sizes) - 1
42
42
layers[i - 1 ] = FFNNLayer (sizes[i])
43
43
end
44
44
layers[end ] = FFNNLayer (sizes[end ], bias= false ) # Last layer without bias
@@ -70,7 +70,7 @@ function FFNNet(layers::Vector{FFNNLayer}, inputsize::Int)
70
70
weights[1 ] = rand (size (layers[1 ]), inputsize + 1 )* 2 * eps - eps
71
71
72
72
# Matrices from layer i-1 (including bias) to layer i
73
- for i in 2 : length (layers)
73
+ @inbounds for i in 2 : length (layers)
74
74
eps = ɛ (size (layers[i]), size (layers[i- 1 ]))
75
75
weights[i] = rand (size (layers[i]), size (layers[i- 1 ]) + 1 )
76
76
end
@@ -120,7 +120,7 @@ function propagate(net::FFNNet, x::Vector{Float64})
120
120
update! (net. layers[1 ], net. weights[1 ] * vcat ([1.0 ], x))
121
121
122
122
# Update all remaining layers
123
- for i in 2 : length (net)
123
+ @inbounds for i in 2 : length (net)
124
124
update! (net. layers[i], net. weights[i] * activate (net. layers[i- 1 ]))
125
125
end
126
126
@@ -153,7 +153,7 @@ function backpropagate(net::FFNNet,
153
153
δ[L] = (der (last. activation)(last))' * der (cost)(output, target)
154
154
155
155
# Find δ of previous layers, backwards
156
- for l in (L- 1 ): - 1 : 1
156
+ @inbounds for l in (L- 1 ): - 1 : 1
157
157
layer = net. layers[l] # Current layer
158
158
W = net. weights[l+ 1 ] # W^(l+1)
159
159
@@ -230,7 +230,7 @@ function train!(net::FFNNet,
230
230
end
231
231
end
232
232
233
- for l in 1 : L
233
+ @inbounds for l in 1 : L
234
234
# Update Weights using Momentum Gradient Descent
235
235
# W^(l) = W^(l) - α∇E - η∇E_old
236
236
net. weights[l] -= α* grad[l] + η* last_grad[l]
0 commit comments