-
Notifications
You must be signed in to change notification settings - Fork 12
/
frem.html
385 lines (347 loc) · 19.1 KB
/
frem.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta property="og:title" content="2.3. FREM: fast ensembling of regularized models for robust decoding" />
<meta property="og:type" content="website" />
<meta property="og:url" content="https://nilearn.github.io/decoding/frem.html" />
<meta property="og:site_name" content="Nilearn" />
<meta property="og:description" content="FREM uses an implicit spatial regularization through fast clustering and aggregates a high number of estimators trained on various splits of the training set, thus returning a very robust decoder a..." />
<meta property="og:image" content="../_images/sphx_glr_plot_haxby_frem_001.png" />
<meta property="og:image:alt" content="Nilearn" />
<title>Nilearn: Statistical Analysis for NeuroImaging in Python — Machine learning for NeuroImaging</title>
<link rel="stylesheet" type="text/css" href="../_static/pygments.css" />
<link rel="stylesheet" type="text/css" href="../_static/nature.css" />
<link rel="stylesheet" type="text/css" href="../_static/copybutton.css" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery.css" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-binder.css" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-dataframe.css" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-rendered-html.css" />
<script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"></script>
<script src="../_static/jquery.js"></script>
<script src="../_static/underscore.js"></script>
<script src="../_static/doctools.js"></script>
<script src="../_static/clipboard.min.js"></script>
<script src="../_static/copybutton.js"></script>
<link rel="shortcut icon" href="../_static/favicon.ico"/>
<link rel="search" title="Search" href="../search.html" />
<link rel="next" title="2.4. SpaceNet: decoding with spatial structure for better maps" href="space_net.html" />
<link rel="prev" title="2.2. Choosing the right predictive model for neuroimaging" href="estimator_choice.html" />
<meta content="True" name="HandheldFriendly">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0">
<meta name="keywords" content="nilearn, neuroimaging, python, neuroscience, machinelearning">
<script type="text/javascript">
function updateTopMenuPosition(height, width) {
if($(window).scrollTop() > height && $(window).outerWidth() > 1024) {
//begin to scroll
$('.related-wrapper').css("z-index", 1000);
$('.related-wrapper').css("position", "sticky");
$('.related-wrapper').css("top", 0);
$('.related-wrapper').css("width", width)
} else {
//lock it back into place
$('.related-wrapper').css("position", "relative");
$('.related-wrapper').css("top", 0)
}
}
$(function() {
var banner_height = $('#logo-banner').outerHeight();
var banner_width = $('#logo-banner').outerWidth();
var width = $('.related-wrapper').css("height", $('.related').outerHeight());
updateTopMenuPosition(banner_height, width);
$(window).scroll(function(event) {
updateTopMenuPosition(banner_height, width)
});
$(window).resize(function(event) {
var banner_width = $('#logo-banner').outerWidth();
var menu_height = $('.related').outerHeight();
$('.related').css("width", banner_width);
$('.related-wrapper').css("height", menu_height);
updateTopMenuPosition(banner_height, width)
})
});
</script>
<script type="text/javascript">
function updateSideBarPosition(top, offset, sections) {
var pos = $(window).scrollTop();
// Lock the table of content to a fixed position once we scroll enough
var topShift = 2 * offset;
if(pos > top + topShift + 1) {
// begin to scroll with sticky menu bar
var topShift = -topShift + 1;
if ($(window).outerWidth() < 1024) {
// compensate top menu that disappears
topShift -= offset + 1
}
$('.sphinxsidebarwrapper').css("position", "fixed");
$('.sphinxsidebarwrapper').css("top", topShift)
}
else {
//lock it back into place
$('.sphinxsidebarwrapper').css("position", "relative");
$('.sphinxsidebarwrapper').css("top",0)
}
// Highlight the current section
i = 0;
current_section = 0;
$('a.internal').removeClass('active');
for(i in sections) {
if(sections[i] > pos) {
break
}
if($('a.internal[href$="' + i + '"]').is(':visible')){
current_section = i
}
}
$('a.internal[href$="' + current_section + '"]').addClass('active');
$('a.internal[href$="' + current_section + '"]').parent().addClass('active')
}
$(function () {
// Lock the table of content to a fixed position once we scroll enough
var tocOffset = $('.related-wrapper').outerHeight();
var marginTop = parseFloat($('.sphinxsidebarwrapper').css('margin-top').replace(/auto/, 0));
var top = $('.sphinxsidebarwrapper').offset().top - marginTop;
sections = {};
url = document.URL.replace(/#.*$/, "");
// Grab positions of our sections
$('.headerlink').each(function(){
sections[this.href.replace(url, '')] = $(this).offset().top - 50
});
updateSideBarPosition(top, tocOffset, sections);
$(window).scroll(function(event) {
updateSideBarPosition(top, tocOffset, sections)
});
$(window).resize(function(event) {
tocOffset = $('.related-wrapper').outerHeight();
updateSideBarPosition(top, tocOffset, sections)
});
});
</script>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-41920728-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head><body>
<div id="logo-banner">
<div class="logo">
<a href="../index.html">
<img src="../_static/nilearn-logo.png" alt="Nilearn logo" border="0" />
</a>
</div>
<!-- A tag cloud to make it easy for people to find what they are
looking for -->
<div class="tags">
<ul>
<li>
<big><a href="../auto_examples/decoding/plot_haxby_anova_svm.html">SVM</a></big>
</li>
<li>
<small><a href="../connectivity/parcellating.html">Ward
clustering</a></small>
</li>
<li>
<a href="searchlight.html">Searchlight</a>
</li>
<li>
<big><a href="../connectivity/resting_state_networks.html">ICA</a></big>
</li>
<li>
<a href="../manipulating_images/data_preparation.html">Nifti IO</a>
</li>
<li>
<a href="../modules/reference.html#module-nilearn.datasets">Datasets</a>
</li>
</ul>
</div>
<div class="banner">
<h1>Nilearn:</h1>
<h2>Statistics for NeuroImaging in Python</h2>
</div>
<div class="search_form">
<div class="gcse-search" id="cse" style="width: 100%;"></div>
<script>
(function() {
var cx = '017289614950330089114:elrt9qoutrq';
var gcse = document.createElement('script');
gcse.type = 'text/javascript';
gcse.async = true;
gcse.src = 'https://cse.google.com/cse.js?cx=' + cx;
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(gcse, s);
})();
</script>
</div>
</div>
<div class=related-wrapper>
<div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../py-modindex.html" title="Python Module Index"
>modules</a></li>
<li class="right" >
<a href="space_net.html" title="2.4. SpaceNet: decoding with spatial structure for better maps"
accesskey="N">next</a> |</li>
<li class="right" >
<a href="estimator_choice.html" title="2.2. Choosing the right predictive model for neuroimaging"
accesskey="P">previous</a> |</li>
<li><a href="../index.html">Nilearn Home</a> | </li>
<li><a href="../user_guide.html">User Guide</a> | </li>
<li><a href="../auto_examples/index.html">Examples</a> | </li>
<li><a href="../modules/reference.html">Reference</a> | </li>
<li id="navbar-about"><a href="../authors.html">About</a>| </li>
<li><a href="../glossary.html">Glossary</a>| </li>
<li><a href="../bibliography.html">Bibliography</a>| </li>
<li id="navbar-ecosystem"><a href="http://www.nipy.org/">Nipy ecosystem</a></li>
<li class="nav-item nav-item-1"><a href="../user_guide.html" >User guide: table of contents</a> »</li>
<li class="nav-item nav-item-2"><a href="index.html" accesskey="U"><span class="section-number">2. </span>Decoding and MVPA: predicting from brain images</a> »</li>
<li class="nav-item nav-item-this"><a href="">Nilearn: Statistical Analysis for NeuroImaging in Python</a></li>
</ul>
</div>
</div>
<div class="stable-banner">
This is the <em>stable</em> documentation for the latest release of Nilearn,
the current development version is available <a href="https://nilearn.github.io/dev/index.html">here</a>.
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="section" id="frem-fast-ensembling-of-regularized-models-for-robust-decoding">
<span id="frem"></span><h1><span class="section-number">2.3. </span>FREM: fast ensembling of regularized models for robust decoding<a class="headerlink" href="#frem-fast-ensembling-of-regularized-models-for-robust-decoding" title="Permalink to this headline">¶</a></h1>
<p><a class="reference internal" href="../glossary.html#term-FREM"><span class="xref std std-term">FREM</span></a> uses an implicit spatial regularization through fast clustering and aggregates a high number of estimators trained on various splits of the training set, thus returning a very robust decoder at a lower computational cost than other spatially regularized methods. Its performance compared to usual classifiers was studied on several datasets in [Hoyos-Idrobo <em>et al.</em> <a class="footnote-reference brackets" href="#hoyosidrobo2018160" id="id1">1</a>].</p>
<div class="section" id="frem-pipeline">
<h2><span class="section-number">2.3.1. </span>FREM pipeline<a class="headerlink" href="#frem-pipeline" title="Permalink to this headline">¶</a></h2>
<p><a class="reference internal" href="../glossary.html#term-FREM"><span class="xref std std-term">FREM</span></a> pipeline averages the coefficients of many models, each trained on a
different split of the training data. For each split:</p>
<blockquote>
<div><ul class="simple">
<li><p>aggregate similar <a class="reference internal" href="../glossary.html#term-voxel"><span class="xref std std-term">voxels</span></a> together to reduce the number of features (and the
computational complexity of the decoding problem). <a class="reference internal" href="../glossary.html#term-ReNA"><span class="xref std std-term">ReNA</span></a> algorithm is used at this
step, usually to reduce by a 10 factor the number of <a class="reference internal" href="../glossary.html#term-voxel"><span class="xref std std-term">voxels</span></a>.</p></li>
<li><p>optional : apply feature selection, an univariate statistical test on clusters
to keep only the ones most informative to predict variable of interest and
further lower the problem complexity.</p></li>
<li><p>find the best hyper-parameter and memorize the coefficients of this model</p></li>
</ul>
</div></blockquote>
<p>Then this ensemble model is used for prediction, usually yielding better and more stable predictions than a unique model at no extra-cost. Also, the resulting coefficient maps obtained tend to be more structured.</p>
<p>There are two object to apply <a class="reference internal" href="../glossary.html#term-FREM"><span class="xref std std-term">FREM</span></a> in Nilearn:</p>
<blockquote>
<div><ul class="simple">
<li><p><a class="reference internal" href="../modules/generated/nilearn.decoding.FREMClassifier.html#nilearn.decoding.FREMClassifier" title="nilearn.decoding.FREMClassifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">nilearn.decoding.FREMClassifier</span></code></a> to predict categories</p></li>
<li><p><a class="reference internal" href="../modules/generated/nilearn.decoding.FREMRegressor.html#nilearn.decoding.FREMRegressor" title="nilearn.decoding.FREMRegressor"><code class="xref py py-class docutils literal notranslate"><span class="pre">nilearn.decoding.FREMRegressor</span></code></a> to predict continuous values (age, gain / loss…)</p></li>
</ul>
</div></blockquote>
<p>They can use different type of models (l2-SVM, l1-SVM, Logistic, Ridge) through the parameter ‘estimator’.</p>
</div>
<div class="section" id="empirical-comparisons">
<h2><span class="section-number">2.3.2. </span>Empirical comparisons<a class="headerlink" href="#empirical-comparisons" title="Permalink to this headline">¶</a></h2>
<div class="section" id="decoding-performance-increase-on-haxby-dataset">
<h3><span class="section-number">2.3.2.1. </span>Decoding performance increase on Haxby dataset<a class="headerlink" href="#decoding-performance-increase-on-haxby-dataset" title="Permalink to this headline">¶</a></h3>
<div class="figure align-default">
<img alt="../_images/sphx_glr_plot_haxby_frem_001.png" src="../_images/sphx_glr_plot_haxby_frem_001.png" />
</div>
<p>In this example we showcase the use of <a class="reference internal" href="../glossary.html#term-FREM"><span class="xref std std-term">FREM</span></a> and the performance increase that
it brings on this problem.</p>
<div class="topic">
<p class="topic-title"><strong>Code</strong></p>
<p>The complete script can be found
<a class="reference internal" href="../auto_examples/02_decoding/plot_haxby_frem.html#sphx-glr-auto-examples-02-decoding-plot-haxby-frem-py"><span class="std std-ref">here</span></a>.</p>
</div>
</div>
<div class="section" id="spatial-regularization-of-decoding-maps-on-mixed-gambles-study">
<h3><span class="section-number">2.3.2.2. </span>Spatial regularization of decoding maps on mixed gambles study<a class="headerlink" href="#spatial-regularization-of-decoding-maps-on-mixed-gambles-study" title="Permalink to this headline">¶</a></h3>
<div class="figure align-default">
<img alt="../_images/sphx_glr_plot_mixed_gambles_frem_001.png" src="../_images/sphx_glr_plot_mixed_gambles_frem_001.png" />
</div>
<div class="topic">
<p class="topic-title"><strong>Code</strong></p>
<p>The complete script can be found
<a class="reference internal" href="../auto_examples/02_decoding/plot_mixed_gambles_frem.html#sphx-glr-auto-examples-02-decoding-plot-mixed-gambles-frem-py"><span class="std std-ref">here</span></a>.</p>
</div>
<div class="admonition seealso">
<p class="admonition-title">See also</p>
<ul class="simple">
<li><p>The <a class="reference external" href="http://scikit-learn.org">scikit-learn documentation</a>
has very detailed explanations on a large variety of estimators and
machine learning techniques. To become better at decoding, you need
to study it.</p></li>
<li><p><a class="reference internal" href="space_net.html#space-net"><span class="std std-ref">SpaceNet</span></a>, a method promoting sparsity that can also
give good brain decoding power and improved decoder maps when sparsity
is important.</p></li>
</ul>
</div>
</div>
</div>
<div class="section" id="references">
<h2><span class="section-number">2.3.3. </span>References<a class="headerlink" href="#references" title="Permalink to this headline">¶</a></h2>
<p><dl class="footnote brackets">
<dt class="label" id="hoyosidrobo2018160"><span class="brackets"><a class="fn-backref" href="#id1">1</a></span></dt>
<dd><p>Andrés Hoyos-Idrobo, Gaël Varoquaux, Yannick Schwartz, and Bertrand Thirion. Frem – scalable and stable decoding with fast regularized ensemble of models. <em>NeuroImage</em>, 180:160–172, 2018. New advances in encoding and decoding of brain signals. URL: <a class="reference external" href="https://www.sciencedirect.com/science/article/pii/S1053811917308182">https://www.sciencedirect.com/science/article/pii/S1053811917308182</a>, <a class="reference external" href="https://doi.org/https://doi.org/10.1016/j.neuroimage.2017.10.005">doi:https://doi.org/10.1016/j.neuroimage.2017.10.005</a>.</p>
</dd>
</dl>
</p>
</div>
</div>
<div class="clearer"></div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h4> Giving credit </h4>
<ul class="simple">
<li><p>Please consider <a href="../authors.html#citing">citing the
papers</a>.</p></li>
</ul>
<h3><a href="../index.html">Table of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">2.3. FREM: fast ensembling of regularized models for robust decoding</a><ul>
<li><a class="reference internal" href="#frem-pipeline">2.3.1. FREM pipeline</a></li>
<li><a class="reference internal" href="#empirical-comparisons">2.3.2. Empirical comparisons</a><ul>
<li><a class="reference internal" href="#decoding-performance-increase-on-haxby-dataset">2.3.2.1. Decoding performance increase on Haxby dataset</a></li>
<li><a class="reference internal" href="#spatial-regularization-of-decoding-maps-on-mixed-gambles-study">2.3.2.2. Spatial regularization of decoding maps on mixed gambles study</a></li>
</ul>
</li>
<li><a class="reference internal" href="#references">2.3.3. References</a></li>
</ul>
</li>
</ul>
<h4>Previous topic</h4>
<p class="topless"><a href="estimator_choice.html"
title="previous chapter"><span class="section-number">2.2. </span>Choosing the right predictive model for neuroimaging</a></p>
<h4>Next topic</h4>
<p class="topless"><a href="space_net.html"
title="next chapter"><span class="section-number">2.4. </span>SpaceNet: decoding with spatial structure for better maps</a></p>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="../search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
© The nilearn developers 2010-2022.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 4.0.2.
<span style="padding-left: 5ex;">
<a href="../_sources/decoding/frem.rst.txt"
rel="nofollow">Show this page source</a>
</span>
</div>
</body>
</html>