forked from CorticalComputer/DXNN2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
neuron.erl
238 lines (230 loc) · 10.3 KB
/
neuron.erl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This source code and work is provided and developed by Gene I. Sher & DXNN Research Group WWW.DXNNResearch.COM
%
%The original release of this source code and the DXNN MK2 system was introduced and explained in my book: Handbook of Neuroevolution Through Erlang. Springer 2012, print ISBN: 978-1-4614-4462-6 ebook ISBN: 978-1-4614-4463-6.
%
%Copyright (C) 2009 by Gene Sher, DXNN Research Group CorticalComputer@gmail.com
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%%%%%%%%%%%%%%%%%%%% Deus Ex Neural Network :: DXNN %%%%%%%%%%%%%%%%%%%%
-module(neuron).
-compile(export_all).
-include("records.hrl").
-define(SAT_LIMIT,math:pi()*10).
-define(OUTPUT_SAT_LIMIT,1).
-define(RO_SIGNAL,0).
-record(state,{
id,
cx_pid,
af,
%pf,
aggrf,
heredity_type,
si_pids=[],
si_pidps_bl = [],
si_pidps_current=[],
si_pidps_backup=[],
mi_pids=[],
mi_pidps_current=[],
mi_pidps_backup=[],
%pf,
pf_current,
pf_backup,
output_pids=[],
ro_pids=[]
}).
gen(ExoSelf_PId,Node)->
spawn(Node,?MODULE,prep,[ExoSelf_PId]).
prep(ExoSelf_PId) ->
random:seed(now()),
receive
{ExoSelf_PId,{Id,Cx_PId,AF,PF,AggrF,HeredityType,SI_PIdPs,MI_PIdPs,Output_PIds,RO_PIds}} ->
fanout(RO_PIds,{self(),forward,[?RO_SIGNAL]}),
SI_PIds = lists:append([IPId || {IPId,_W} <- SI_PIdPs, IPId =/= bias],[ok]),
MI_PIds = lists:append([IPId || {IPId,_W} <- MI_PIdPs, IPId =/= bias],[ok]),
%io:format("SI_PIdPs:~p ~nMI_PIdPs:~p~n",[SI_PIdPs,MI_PIdPs]),
S=#state{
id=Id,
cx_pid=Cx_PId,
af=AF,
pf_current=PF,
pf_backup=PF,
aggrf=AggrF,
heredity_type = HeredityType,
si_pids=SI_PIds,
si_pidps_bl = SI_PIdPs,
si_pidps_current=SI_PIdPs,
si_pidps_backup=SI_PIdPs,
mi_pids=MI_PIds,
mi_pidps_current=MI_PIdPs,
mi_pidps_backup=MI_PIdPs,
output_pids=Output_PIds,
ro_pids=RO_PIds
},
loop(S,ExoSelf_PId,SI_PIds,MI_PIds,[],[])
end.
%When gen/2 is executed, it spawns the neuron element and immediately begins to wait for its initial state message from the exoself. Once the state message arrives, the neuron sends out the default forward signals to any elements in its ro_ids list, if any. Afterwards, prep drops into the neuron's main loop.
loop(S,ExoSelf_PId,[ok],[ok],SIAcc,MIAcc)->
PF = S#state.pf_current,
%PreProcessors=S#state.pre_processors,
%SignalIntegrator=S#state.signal_integrator,
AF = S#state.af,
%PostProcessors=S#state.post_processors,
AggrF = S#state.aggrf,
{PFName,PFParameters} = PF,
%io:format("self:~p~n SIAcc:~p~n MIAcc:~p~n",[self(), SIAcc,MIAcc]),
Ordered_SIAcc = lists:reverse(SIAcc),
SI_PIdPs = S#state.si_pidps_current,
%SAggregation_Product = signal_aggregator:AggrF(Ordered_SIAcc,SI_PIdPs),
%SOutput = sat(functions:AF(SAggregation_Product),?OUTPUT_SAT_LIMIT),%Saturation is done at -1 and 1
SOutput = sat(functions:AF(signal_aggregator:AggrF(Ordered_SIAcc,SI_PIdPs)),?OUTPUT_SAT_LIMIT),
%io:format("SOutput:~p~n",[SOutput]),
Output_PIds = S#state.output_pids,
[Output_PId ! {self(),forward,[SOutput]} || Output_PId <- Output_PIds],
case PFName of
none ->
U_S=S;
_ ->%io:format("MIAcc:~p, S:~p~n",[MIAcc,S]),
Ordered_MIAcc = lists:reverse(MIAcc),
MI_PIdPs = S#state.mi_pidps_current,
MAggregation_Product = signal_aggregator:dot_product(Ordered_MIAcc,MI_PIdPs),
MOutput = sat(functions:tanh(MAggregation_Product),?SAT_LIMIT),
U_SI_PIdPs = plasticity:PFName([MOutput|PFParameters],Ordered_SIAcc,SI_PIdPs,SOutput),
%io:format("U_SI_PIdPs:~p~n",[U_SI_PIdPs]),
U_S=S#state{
si_pidps_current = U_SI_PIdPs
}
end,
SI_PIds = S#state.si_pids,
MI_PIds = S#state.mi_pids,
neuron:loop(U_S,ExoSelf_PId,SI_PIds,MI_PIds,[],[]);
loop(S,ExoSelf_PId,[SI_PId|SI_PIds],[MI_PId|MI_PIds],SIAcc,MIAcc)->
receive
{SI_PId,forward,Input}->
%io:format("Id:~p Input:~p~n",[S#state.id,Input]),
loop(S,ExoSelf_PId,SI_PIds,[MI_PId|MI_PIds],[{SI_PId,Input}|SIAcc],MIAcc);
{MI_PId,forward,Input}->
loop(S,ExoSelf_PId,[SI_PId|SI_PIds],MI_PIds,SIAcc,[{MI_PId,Input}|MIAcc]);
{ExoSelf_PId,weight_backup}->
U_S=case S#state.heredity_type of
darwinian ->
S#state{
si_pidps_backup=S#state.si_pidps_bl,
mi_pidps_backup=S#state.mi_pidps_current,
pf_backup=S#state.pf_current
};
lamarckian ->
S#state{
si_pidps_backup=S#state.si_pidps_current,
mi_pidps_backup=S#state.mi_pidps_current,
pf_backup=S#state.pf_current
}
end,
loop(U_S,ExoSelf_PId,[SI_PId|SI_PIds],[MI_PId|MI_PIds],SIAcc,MIAcc);
{ExoSelf_PId,weight_restore}->
U_S = S#state{
si_pidps_bl=S#state.si_pidps_backup,
si_pidps_current=S#state.si_pidps_backup,
mi_pidps_current=S#state.mi_pidps_backup,
pf_current=S#state.pf_backup
},
loop(U_S,ExoSelf_PId,[SI_PId|SI_PIds],[MI_PId|MI_PIds],SIAcc,MIAcc);
{ExoSelf_PId,weight_perturb,Spread}->
Perturbed_SIPIdPs=perturb_IPIdPs(Spread,S#state.si_pidps_backup),
Perturbed_MIPIdPs=perturb_IPIdPs(Spread,S#state.mi_pidps_backup),
Perturbed_PF=perturb_PF(Spread,S#state.pf_backup),
U_S=S#state{
si_pidps_bl=Perturbed_SIPIdPs,
si_pidps_current=Perturbed_SIPIdPs,
mi_pidps_current=Perturbed_MIPIdPs,
pf_current=Perturbed_PF
},
loop(U_S,ExoSelf_PId,[SI_PId|SI_PIds],[MI_PId|MI_PIds],SIAcc,MIAcc);
{ExoSelf_PId,reset_prep}->
neuron:flush_buffer(),
ExoSelf_PId ! {self(),ready},
RO_PIds = S#state.ro_pids,
receive
{ExoSelf_PId, reset}->
fanout(RO_PIds,{self(),forward,[?RO_SIGNAL]})
end,
loop(S,ExoSelf_PId,S#state.si_pids,S#state.mi_pids,[],[]);
{ExoSelf_PId,get_backup}->
NId = S#state.id,
ExoSelf_PId ! {self(),NId,S#state.si_pidps_backup,S#state.mi_pidps_backup,S#state.pf_backup},
loop(S,ExoSelf_PId,[SI_PId|SI_PIds],[MI_PId|MI_PIds],SIAcc,MIAcc);
{ExoSelf_PId,terminate}->
%io:format("Neuron:~p is terminating.~n",[self()])
ok
%after 10000 ->
%io:format("neuron:~p stuck.~n",[S#state.id])
end.
%The neuron process waits for vector signals from all the processes that it's connected from, taking the dot product of the input and weight vectors, and then adding it to the accumulator. Once all the signals from Input_PIds are received, the accumulator contains the dot product to which the neuron then adds the bias and executes the activation function. After fanning out the output signal, the neuron again returns to waiting for incoming signals. When the neuron receives the {ExoSelf_PId,get_backup} message, it forwards to the exoself its full MInput_PIdPs list, and its Id. The MInput_PIdPs contains the modified, tuned and most effective version of the input_idps. The neuron process is also accepts weight_backup signal, when receiving it the neuron saves to process dictionary the current MInput_PIdPs. When the neuron receives the weight_restore signal, it reads back from the process dictionary the stored Input_PIdPs, and switches over to using it as its active Input_PIdPs list. When the neuron receives the weight_perturb signal from the exoself, it perturbs the weights by executing the perturb_Lipids/1 function, which returns the updated list. Finally, the neuron can also accept a reset_prep signal, which makes the neuron flush its buffer in the off chance that it has a recursively sent signal in its inbox. After flushing its buffer, the neuron waits for the exoself to send it the reset signal, at which point the neuron, now fully refreshed after the flush_buffer/0, outputs a default forward signal to its recursively connected elements, if any, and then drops back into the main loop.
fanout([Pid|Pids],Msg)->
Pid ! Msg,
fanout(Pids,Msg);
fanout([],_Msg)->
true.
%The fanout/2 function fans out th Msg to all the PIds in its list.
flush_buffer()->
receive
_ ->
flush_buffer()
after 0 ->
done
end.
%The flush_buffer/0 cleans out the element's inbox.
perturb_IPIdPs(Spread,[])->[];
perturb_IPIdPs(Spread,Input_PIdPs)->
%Tot_Weights=lists:sum([length(WeightsP) || {_Input_PId,WeightsP}<-Input_PIdPs]),
%MP = 1/math:sqrt(Tot_Weights),
MP = 1/math:sqrt(length(Input_PIdPs)),
perturb_IPIdPs(Spread,MP,Input_PIdPs,[]).
perturb_IPIdPs(Spread,MP,[{Input_PId,WeightsP}|Input_PIdPs],Acc)->
%MP = 1/math:sqrt(length(WeightsP)),
U_WeightsP = case random:uniform() < MP of
true ->
perturb_weightsP(Spread,1/math:sqrt(length(WeightsP)),WeightsP,[]);
false ->
WeightsP
end,
perturb_IPIdPs(Spread,MP,Input_PIdPs,[{Input_PId,U_WeightsP}|Acc]);
perturb_IPIdPs(_Spread,_MP,[],Acc)->
lists:reverse(Acc).
%The perturb_IPIdPs/1 function calculates the probability with which each neuron in the Input_PIdPs is chosen to be perturbed. The probablity is based on the total number of weights in the Input_PIdPs list, with the actual mutation probablity equating to the inverse of square root of total number of weights. The perturb_IPIdPs/3 function goes through each weights block and calls the perturb_weights/3 to perturb the weights.
perturb_weightsP(Spread,MP,[{W,LPs}|Weights],Acc)->
%io:format("Spread:~p~n",[Spread]),
U_W = case random:uniform() < MP of
true->
DW = (random:uniform()-0.5)*Spread,
%io:format("self:~p DW:~p~n",[DW,self()]),
sat(W+DW,-?SAT_LIMIT,?SAT_LIMIT);
false ->
W
end,
perturb_weightsP(Spread,MP,Weights,[{U_W,LPs}|Acc]);
perturb_weightsP(_Spread,_MP,[],Acc)->
lists:reverse(Acc).
%The perturb_weights/3 function is the function that actually goes through each weight block, and perturbs each weight with a probablity of MP. If the weight is chosen to be perturbed, the perturbation intensity is chosen uniformly between -Spread and Spread.
sat(Val,Limit)->
sat(Val,-abs(Limit),abs(Limit)).
sat(Val,Min,Max)->
if
Val < Min -> Min;
Val > Max -> Max;
true -> Val
end.
%sat/3 function simply ensures that the Val is neither less than min or greater than max.
perturb_PF(Spread,{PFName,PFParameters})->
U_PFParameters = [sat(PFParameter+(random:uniform()-0.5)*Spread,-?SAT_LIMIT,?SAT_LIMIT)||PFParameter<-PFParameters],
{PFName,PFParameters}.