-
Notifications
You must be signed in to change notification settings - Fork 0
/
td.go
287 lines (249 loc) · 7.53 KB
/
td.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rl
import (
"log"
"github.com/chewxy/math32"
"github.com/PrincetonCompMemLab/neurodiff_leabra/deep"
"github.com/PrincetonCompMemLab/neurodiff_leabra/leabra"
"github.com/goki/ki/kit"
)
// TDRewPredLayer is the temporal differences reward prediction layer.
// It represents estimated value V(t) in the minus phase, and computes
// estimated V(t+1) based on its learned weights in plus phase.
// Use TDRewPredPrjn for DA modulated learning.
type TDRewPredLayer struct {
leabra.Layer
DA float32 `inactive:"+" desc:"dopamine value for this layer"`
}
var KiT_TDRewPredLayer = kit.Types.AddType(&TDRewPredLayer{}, leabra.LayerProps)
// DALayer interface:
func (ly *TDRewPredLayer) GetDA() float32 { return ly.DA }
func (ly *TDRewPredLayer) SetDA(da float32) { ly.DA = da }
// ActFmG computes linear activation for TDRewPred
func (ly *TDRewPredLayer) ActFmG(ltime *leabra.Time) {
for ni := range ly.Neurons {
nrn := &ly.Neurons[ni]
if nrn.IsOff() {
continue
}
if ltime.Quarter == 3 { // plus phase
nrn.Act = nrn.Ge // linear
} else {
nrn.Act = nrn.ActP // previous actP
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
// TDRewIntegLayer
// TDRewIntegParams are params for reward integrator layer
type TDRewIntegParams struct {
Discount float32 `desc:"discount factor -- how much to discount the future prediction from RewPred"`
RewPred string `desc:"name of TDRewPredLayer to get reward prediction from "`
}
func (tp *TDRewIntegParams) Defaults() {
tp.Discount = 0.9
if tp.RewPred == "" {
tp.RewPred = "RewPred"
}
}
// TDRewIntegLayer is the temporal differences reward integration layer.
// It represents estimated value V(t) in the minus phase, and
// estimated V(t+1) + r(t) in the plus phase.
// It computes r(t) from (typically fixed) weights from a reward layer,
// and directly accesses values from RewPred layer.
type TDRewIntegLayer struct {
leabra.Layer
RewInteg TDRewIntegParams `desc:"parameters for reward integration"`
DA float32 `desc:"dopamine value for this layer"`
}
var KiT_TDRewIntegLayer = kit.Types.AddType(&TDRewIntegLayer{}, leabra.LayerProps)
func (ly *TDRewIntegLayer) Defaults() {
ly.Layer.Defaults()
ly.RewInteg.Defaults()
}
// DALayer interface:
func (ly *TDRewIntegLayer) GetDA() float32 { return ly.DA }
func (ly *TDRewIntegLayer) SetDA(da float32) { ly.DA = da }
func (ly *TDRewIntegLayer) RewPredLayer() (*TDRewPredLayer, error) {
tly, err := ly.Network.LayerByNameTry(ly.RewInteg.RewPred)
if err != nil {
log.Printf("TDRewIntegLayer %s RewPredLayer: %v\n", ly.Name(), err)
return nil, err
}
return tly.(*TDRewPredLayer), nil
}
// Build constructs the layer state, including calling Build on the projections.
func (ly *TDRewIntegLayer) Build() error {
err := ly.Layer.Build()
if err != nil {
return err
}
_, err = ly.RewPredLayer()
return err
}
func (ly *TDRewIntegLayer) ActFmG(ltime *leabra.Time) {
rply, _ := ly.RewPredLayer()
if rply == nil {
return
}
rpActP := rply.Neurons[0].ActP
rpAct := rply.Neurons[0].Act
for ni := range ly.Neurons {
nrn := &ly.Neurons[ni]
if nrn.IsOff() {
continue
}
if ltime.Quarter == 3 { // plus phase
nrn.Act = nrn.Ge + ly.RewInteg.Discount*rpAct
} else {
nrn.Act = rpActP // previous actP
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
// TDDaLayer
// TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD)
// between the TDRewIntegLayer activations in the minus and plus phase.
type TDDaLayer struct {
leabra.Layer
SendDA SendDA `desc:"list of layers to send dopamine to"`
RewInteg string `desc:"name of TDRewIntegLayer from which this computes the temporal derivative"`
DA float32 `desc:"dopamine value for this layer"`
}
var KiT_TDDaLayer = kit.Types.AddType(&TDDaLayer{}, deep.LayerProps)
func (ly *TDDaLayer) Defaults() {
ly.Layer.Defaults()
if ly.RewInteg == "" {
ly.RewInteg = "RewInteg"
}
}
// DALayer interface:
func (ly *TDDaLayer) GetDA() float32 { return ly.DA }
func (ly *TDDaLayer) SetDA(da float32) { ly.DA = da }
func (ly *TDDaLayer) RewIntegLayer() (*TDRewIntegLayer, error) {
tly, err := ly.Network.LayerByNameTry(ly.RewInteg)
if err != nil {
log.Printf("TDDaLayer %s RewIntegLayer: %v\n", ly.Name(), err)
return nil, err
}
return tly.(*TDRewIntegLayer), nil
}
// Build constructs the layer state, including calling Build on the projections.
func (ly *TDDaLayer) Build() error {
err := ly.Layer.Build()
if err != nil {
return err
}
err = ly.SendDA.Validate(ly.Network, ly.Name()+" SendTo list")
if err != nil {
return err
}
_, err = ly.RewIntegLayer()
return err
}
func (ly *TDDaLayer) ActFmG(ltime *leabra.Time) {
rily, _ := ly.RewIntegLayer()
if rily == nil {
return
}
rpActP := rily.Neurons[0].Act
rpActM := rily.Neurons[0].ActM
da := rpActP - rpActM
for ni := range ly.Neurons {
nrn := &ly.Neurons[ni]
if nrn.IsOff() {
continue
}
if ltime.Quarter == 3 { // plus phase
nrn.Act = da
} else {
nrn.Act = 0
}
}
}
// CyclePost is called at end of Cycle
// We use it to send DA, which will then be active for the next cycle of processing.
func (ly *TDDaLayer) CyclePost(ltime *leabra.Time) {
act := ly.Neurons[0].Act
ly.DA = act
ly.SendDA.SendDA(ly.Network, act)
}
//////////////////////////////////////////////////////////////////////////////////////
// TDRewPredPrjn
// TDRewPredPrjn does dopamine-modulated learning for reward prediction:
// DWt = Da * Send.ActQ0 (activity on *previous* timestep)
// Use in TDRewPredLayer typically to generate reward predictions.
// Has no weight bounds or limits on sign etc.
type TDRewPredPrjn struct {
deep.Prjn
}
var KiT_TDRewPredPrjn = kit.Types.AddType(&TDRewPredPrjn{}, deep.PrjnProps)
func (pj *TDRewPredPrjn) Defaults() {
pj.Prjn.Defaults()
// no additional factors
pj.Learn.WtSig.Gain = 1
pj.Learn.Norm.On = false
pj.Learn.Momentum.On = false
pj.Learn.WtBal.On = false
}
// DWt computes the weight change (learning) -- on sending projections.
func (pj *TDRewPredPrjn) DWt() {
if !pj.Learn.Learn {
return
}
slay := pj.Send.(leabra.LeabraLayer).AsLeabra()
// rlay := pj.Recv.(leabra.LeabraLayer).AsLeabra()
da := pj.Recv.(DALayer).GetDA()
for si := range slay.Neurons {
sn := &slay.Neurons[si]
nc := int(pj.SConN[si])
st := int(pj.SConIdxSt[si])
syns := pj.Syns[st : st+nc]
// scons := pj.SConIdx[st : st+nc]
for ci := range syns {
sy := &syns[ci]
// ri := scons[ci]
dwt := da * sn.ActQ0 // no recv unit activation, prior trial act
norm := float32(1)
if pj.Learn.Norm.On {
norm = pj.Learn.Norm.NormFmAbsDWt(&sy.Norm, math32.Abs(dwt))
}
if pj.Learn.Momentum.On {
dwt = norm * pj.Learn.Momentum.MomentFmDWt(&sy.Moment, dwt)
} else {
dwt *= norm
}
sy.DWt += pj.Learn.Lrate * dwt
}
// aggregate max DWtNorm over sending synapses
if pj.Learn.Norm.On {
maxNorm := float32(0)
for ci := range syns {
sy := &syns[ci]
if sy.Norm > maxNorm {
maxNorm = sy.Norm
}
}
for ci := range syns {
sy := &syns[ci]
sy.Norm = maxNorm
}
}
}
}
// WtFmDWt updates the synaptic weight values from delta-weight changes -- on sending projections
func (pj *TDRewPredPrjn) WtFmDWt() {
if !pj.Learn.Learn {
return
}
for si := range pj.Syns {
sy := &pj.Syns[si]
if sy.DWt != 0 {
sy.Wt += sy.DWt // straight update, no limits or anything
sy.LWt = sy.Wt
sy.DWt = 0
}
}
}