-
Notifications
You must be signed in to change notification settings - Fork 8
/
def_params.go
193 lines (181 loc) · 7.87 KB
/
def_params.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
// Copyright (c) 2020, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/emer/emergent/v2/netparams"
"github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
// selected to apply on top of that
var ParamSets = netparams.Sets{
"Base": {
// {Sel: "Path", Desc: "basic path params",
// Params: params.Params{
// "Path.Learn.LRate.Base": "0.4",
// }},
{Sel: ".InhibLateral", Desc: "circle lateral inhibitory connection -- good params, longer time, more ABmem",
Params: params.Params{
"Path.Learn.Learn": "false", // ??? not sure
// "Path.SWts.Init.Mean": "1", // 0.1 was the standard Grid model as of 02242023
"Path.SWts.Init.Var": "0",
"Path.SWts.Init.Sym": "false",
"Path.PathScale.Abs": "0.1", // lower is better for spiking model?
}},
// {Sel: ".EcCa1Path", Desc: "encoder pathways -- Abs only affecting ec3toca1 and ec5toca1, not ca1toec5",
// Params: params.Params{
// "Path.PathScale.Abs": "0.1", // as low as 0.3 helped hugely preventing CA1 fixation, even 0.1 works -- try each one of them separately
// "Path.Learn.LRate.Base": "0.2",
// }},
{Sel: ".HippoCHL", Desc: "hippo CHL pathways -- no norm, moment, but YES wtbal = sig better",
Params: params.Params{
"Path.Learn.Learn": "true",
// "Path.CHL.Hebb": "0.01", // .01 > .05? > .1?
"Path.Learn.LRate.Base": "0.2", // .2
}},
{Sel: ".PPath", Desc: "performant path, new Dg error-driven EcCa1Path paths",
Params: params.Params{
// "Path.PathScale.Abs": "0.8", // 0.8 helps preventing CA3 fixation
"Path.Learn.Learn": "true",
"Path.Learn.LRate.Base": "0.2", // err driven: .15 > .2 > .25 > .1
}},
{Sel: "#CA1ToEC5", Desc: "extra strong from CA1 to EC5",
Params: params.Params{
"Path.PathScale.Abs": "3.0", // 4 > 6 > 2 (fails)
"Path.Learn.LRate.Base": "0.4", // ABmem slightly impaired compared to 0.2 but faster
}},
{Sel: "#InputToEC2", Desc: "for CAN ec2",
Params: params.Params{
"Path.PathScale.Rel": "2.0", // 2 vs. 1: memory much better, FirstPerfect generally longer
"Path.Learn.Learn": "false", // no learning better
}},
{Sel: "#InputToEC3", Desc: "one-to-one input to EC",
Params: params.Params{
"Path.Learn.Learn": "false",
"Path.SWts.Init.Mean": "0.8",
"Path.SWts.Init.Var": "0.0",
}},
{Sel: "#EC3ToEC2", Desc: "copied from InputToEC2",
Params: params.Params{
"Path.Learn.Learn": "false", // no learning better
//"Path.Learn.LRate.Base": "0.01",
//"Path.SWts.Init.Mean": "0.8", // 0.8 is for one to one deterministic connections, not for learning!
//"Path.SWts.Init.Var": "0",
"Path.PathScale.Abs": "0.5", // was 1, lower better
}},
{Sel: "#EC5ToEC3", Desc: "one-to-one out to in",
Params: params.Params{
"Path.Learn.Learn": "false",
"Path.SWts.Init.Mean": "0.9",
"Path.SWts.Init.Var": "0.01",
"Path.PathScale.Rel": "0.5", // was 0.5
}},
{Sel: "#DGToCA3", Desc: "Mossy fibers: strong, non-learning",
Params: params.Params{
"Path.Learn.Learn": "false", // learning here definitely does NOT work!
// "Path.SWts.Init.Mean": "0.9", // commmenting this our prevents CA3 overactivation
"Path.SWts.Init.Var": "0.01",
"Path.PathScale.Rel": "4", // err del 4: 4 > 6 > 8
"Path.PathScale.Abs": "0.3",
}},
// {Sel: "#EC2ToCA3", Desc: "EC2 Perforant Path",
// Params: params.Params{
// // "Path.PathScale.Abs": "2",
// "Path.Learn.LRate.Base": "0.4", // list150: 0.2 > 0.3 > 0.1 > 0.05 > 0.01
// }},
{Sel: "#CA3ToCA3", Desc: "CA3 recurrent cons: rel=2 still the best",
Params: params.Params{
"Path.PathScale.Abs": "0.3",
"Path.PathScale.Rel": "2", // 2 > 1 > .5 = .1
// "Path.Learn.LRate.Base": "0.4", // .1 > .08 (close) > .15 > .2 > .04; large list size: 0.01>0.1~=0.04
}},
{Sel: "#EC2ToDG", Desc: "DG learning is surprisingly critical: maxed out fast, hebbian works best",
Params: params.Params{
// "Path.Hip.Hebb": "0.2",
// "Path.Hip.Err": "0.8",
// "Path.Hip.SAvgCor": "0.1",
// "Path.Hip.SNominal": "0.02", // !! need to keep it the same as actual layer Nominal
"Path.Learn.Learn": "true", // absolutely essential to have on! learning slow if off. key for NoDGLearn
"Path.PathScale.Abs": "0.7",
"Path.Learn.LRate.Base": "0.2",
}},
{Sel: "#CA3ToCA1", Desc: "Schaffer collaterals -- slower, less hebb",
Params: params.Params{
// "Path.PathScale.Abs": "1.5",
// "Path.Hip.Hebb": "0.01", // worked whole 300 epcs!
// "Path.Hip.Err": "0.9",
// "Path.Hip.Hebb": "0",
// "Path.Hip.Err": "1",
// "Path.SWts.Adapt.SigGain": "1",
// "Path.SWts.Init.SPct": "0",
// "Path.Learn.Trace.SubMean": "1", // predition: zero-sum at LWt level makes more fixation
// "Path.PathScale.Abs": "0.1",
// "Path.Hip.SAvgCor": "0.4",
// "Path.Hip.SNominal": "0.03", // !! need to keep it the same as actual layer Nominal
"Path.Learn.LRate.Base": "0.2", // CHL: .1 =~ .08 > .15 > .2, .05 (sig worse)
}},
// {Sel: "#EC3ToCA1", Desc: "EC3 Perforant Path",
// Params: params.Params{
// "Path.PathScale.Abs": "0.1",
// // "Path.SWts.Adapt.SigGain": "1", // if 1, Wt = LWt, weight more linear less extreme, if 6 (default), Wt = sigmoid(LWt)
// }},
{Sel: "#EC5ToCA1", Desc: "EC5 Perforant Path",
Params: params.Params{
"Path.PathScale.Rel": "0.3", // Back proj should generally be very weak but we're specifically setting this here bc others are set already
}},
{Sel: ".EC", Desc: "all EC layers: only pools, no layer-level -- now for EC3 and EC5",
Params: params.Params{
// "Layer.Inhib.ActAvg.Nominal": "0.2",
// "Layer.Inhib.Layer.On": "false",
// "Layer.Inhib.Layer.Gi": "0.2", // weak just to keep it from blowing up
// "Layer.Inhib.Pool.Gi": "1.1",
// "Layer.Inhib.Pool.On": "true",
// "Layer.Act.Gbar.L": "0.1",
"Layer.Inhib.ActAvg.Nominal": "0.05",
"Layer.Inhib.Layer.On": "false",
"Layer.Inhib.Pool.On": "true",
"Layer.Inhib.Pool.Gi": "1.1",
"Layer.Acts.Clamp.Ge": "1.4",
// "Layer.Learn.TrgAvgAct.SubMean": "0",
"Layer.Learn.TrgAvgAct.SynScaleRate": "0.0002",
}},
{Sel: "#DG", Desc: "very sparse = high inhibition",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.01",
"Layer.Inhib.Layer.Gi": "2.4",
// "Layer.Learn.TrgAvgAct.SubMean": "0",
"Layer.Learn.TrgAvgAct.SynScaleRate": "0.0002",
// "Layer.Inhib.Layer.FB": "4",
// "Layer.Learn.RLRate.SigmoidMin": "0.01",
}},
{Sel: "#EC2", Desc: "very sparse = high inhibition",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.02",
"Layer.Inhib.Layer.Gi": "1.2",
"Layer.Learn.TrgAvgAct.SynScaleRate": "0.0002",
// "Layer.Inhib.Layer.FB": "4",
// "Layer.Learn.RLRate.SigmoidMin": "0.01",
}},
{Sel: "#CA3", Desc: "sparse = high inhibition",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.01",
"Layer.Inhib.Layer.Gi": "1.2",
// "Layer.Learn.TrgAvgAct.SubMean": "0",
"Layer.Learn.TrgAvgAct.SynScaleRate": "0.0002",
// "Layer.Inhib.Layer.FB": "4",
// "Layer.Learn.RLRate.SigmoidMin": "0.01",
}},
{Sel: "#CA1", Desc: "CA1 only Pools",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.03",
"Layer.Inhib.Layer.On": "false",
"Layer.Inhib.Pool.On": "true",
"Layer.Inhib.Pool.Gi": "1.1",
// "Layer.Learn.TrgAvgAct.SubMean": "0",
// "Layer.Learn.TrgAvgAct.On": "false",
"Layer.Learn.TrgAvgAct.SynScaleRate": "0.0002",
// "Layer.Inhib.Pool.FB": "4",
// "Layer.Learn.RLRate.SigmoidMin": "0.01",
}},
},
}