-
Notifications
You must be signed in to change notification settings - Fork 0
/
runtests.jl
203 lines (170 loc) · 6.51 KB
/
runtests.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
using Nervoso, FactCheck
facts("Layer") do
context("Build a layer") do
@fact FFNNLayer(10) --> not(nothing)
@fact FFNNLayer(10, softmax, bias = false) --> not(nothing)
layer1 = FFNNLayer(10)
layer2 = FFNNLayer(10, softmax, bias = false)
context("With Bias") do
@fact layer1.bias --> true
@fact length(layer1.neurons) --> 11
@fact layer1.activation == tanh --> true
end
context("Without Bias and different activation function") do
layer2 = FFNNLayer(10, softmax, bias = false)
@fact layer2.bias --> false
@fact length(layer2.neurons) --> 10
@fact layer2.activation == softmax --> true
end
end
layer1 = FFNNLayer(10)
layer2 = FFNNLayer(10, softmax, bias = false)
context("Basic informations of a layer") do
context("Default activation") do
@fact layer1 == FFNNLayer(10, tanh) --> true
end
context("Size and length") do
@fact size(layer1) --> 10
@fact length(layer2) --> 10
@fact size(layer1) --> 10
@fact length(layer2) --> 10
end
context("Access neurons") do
@fact layer1[1] --> 0
@fact layer2[1] --> 0
@fact layer1[end] --> 0
@fact layer2[end] --> 0
end
context("Access bias") do
@fact layer1[0] --> 1.0
@fact_throws layer2[0]
end
context("Iterating on a layer") do
# Iterate on layer1
c = 0
for i in layer1
@fact i --> 0.0
c += 1
end
@fact c --> 10
# Iterate on layer1
c = 0
for i in layer2
@fact i --> 0.0
c += 1
end
@fact c --> 10
end
end
context("Update and activate a layer") do
context("Activate") do
@fact activate(layer1) --> vcat([1.0], zeros(10))
@fact activate(layer2) --> ones(10)/10
end
context("Update") do
@fact_throws update(layer1, [1])
update!(layer1, ones(10))
@fact layer1.neurons --> ones(11)
@fact_throws update(layer2, [1])
update!(layer2, ones(10))
@fact layer2.neurons --> ones(10)
end
end
end
facts("Network") do
context("Build a network") do
@fact FFNNet(10,10,10) --> not(nothing)
@fact FFNNet(10,10,10,10,10) --> not(nothing)
layer1 = FFNNLayer(10)
layer2 = FFNNLayer(10, softmax, bias = false)
@fact FFNNet([layer1, layer2], 10) --> not(nothing)
@fact_throws FFNNet(10)
@fact_throws FFNNet(10,10)
end
net1 = FFNNet(2,10,10)
net2 = FFNNet(2,10,10,10,10)
context("Basic informations of a network") do
@fact length(net1) --> 2
@fact length(net2) --> 4
@fact size(net1) --> (2,2)
@fact size(net2) --> (2,4)
end
context("Propagating examples") do
@fact (typeof(propagate(net1, ones(2))) == Vector{Float64}) --> true
@fact (typeof(propagate(net2, ones(2))) == Vector{Float64}) --> true
@fact_throws propagate(net1, ones(1))
@fact_throws propagate(net1, ones(3))
@fact_throws propagate(net2, ones(1))
@fact_throws propagate(net2, ones(3))
end
inputs = Vector{Float64}[
rand(2),
rand(2),
rand(2),
]
outputs = Vector{Float64}[
rand(10),
rand(10),
rand(10),
]
context("Training") do
@fact train!(net1, inputs, outputs) --> nothing
@fact train!(net2, inputs, outputs) --> nothing
end
context("Assessing performance") do
@fact (typeof(meanerror(net1, inputs, outputs)) == Float64) --> true
@fact (typeof(meanerror(net2, inputs, outputs)) == Float64) --> true
@fact (typeof(classerror(net1, inputs, outputs)) == Int) --> true
@fact (typeof(classerror(net2, inputs, outputs)) == Int) --> true
end
end
facts("Activation functions") do
layer1 = FFNNLayer(10)
layer2 = FFNNLayer(10, bias = false)
context("tanh") do
@fact derivatives[tanh] == tanhprime --> true
@fact der(tanh) == tanhprime --> true
@fact typeof(tanh(layer1)) == Vector{Float64} --> true
@fact length(tanh(layer1)) --> length(layer1) + 1
@fact tanh(layer1) --> vcat([1.0], zeros(length(layer1)))
@fact typeof(der(tanh)(layer1)) == Matrix{Float64} --> true
@fact size(der(tanh)(layer1)) --> (length(layer1) + 1, length(layer1) + 1)
@fact typeof(tanh(layer1)) == Vector{Float64} --> true
@fact length(softmax(layer2)) --> length(layer2)
@fact tanh(layer2) --> zeros(length(layer2))
@fact typeof(der(tanh)(layer2)) == Matrix{Float64} --> true
@fact size(der(tanh)(layer2)) --> (length(layer2), length(layer2))
end
context("softmax") do
@fact derivatives[softmax] == softmaxprime --> true
@fact der(softmax) == softmaxprime --> true
@fact typeof(softmax(layer1)) == Vector{Float64} --> true
@fact length(softmax(layer1)) --> length(layer1) + 1
@fact typeof(der(softmax)(layer1)) == Matrix{Float64} --> true
@fact size(der(softmax)(layer1)) --> (length(layer1) + 1, length(layer1) + 1)
@fact typeof(softmax(layer2)) == Vector{Float64} --> true
@fact length(softmax(layer2)) --> length(layer2)
@fact typeof(der(softmax)(layer2)) == Matrix{Float64} --> true
@fact size(der(softmax)(layer2)) --> (length(layer2), length(layer2))
end
context("logistic") do
@pending derivatives[σ] == σprime --> true
@pending der(σ) == σprime --> true
@pending derivatives[logistic] == logisticprime --> true
@pending der(logistic) == logisticprime --> true
end
end
facts("Cost functions") do
context("Quadratic Error") do
@fact der(quaderror) == Nervoso.derivatives[quaderror] --> true
@fact der(quaderror) == Nervoso.quaderrorprime --> true
@fact quaderror([1.0,0.0], [0.0,1.0]) --> roughly(1.0)
@fact Nervoso.quaderrorprime([1.0,0.0], [0.0,1.0]) --> [1.0,-1.0]
end
context("Cross-entropy Error") do
@fact der(ceerror) == Nervoso.derivatives[ceerror] --> true
@fact der(ceerror) == Nervoso.ceerrorprime --> true
@fact ceerror([0.5,0.5], [0.5,0.5]) .< [Inf] --> Bool[true]
@fact Nervoso.ceerrorprime([0.5,0.5], [0.5,0.5]) --> [-1.0, -1.0]
end
end