-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
evoked.py
1723 lines (1530 loc) · 71.9 KB
/
evoked.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Functions to plot evoked M/EEG data (besides topographies)."""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from functools import partial
from copy import deepcopy
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type, pick_types, _picks_by_type,
_pick_data_channels, _VALID_CHANNEL_TYPES)
from ..externals.six import string_types
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax)
from ..utils import logger, _clean_names, warn, _pl
from ..io.pick import pick_info
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topo_plot, plot_topomap, _check_outlines,
_draw_outlines, _prepare_topomap, _topomap_animation,
_set_contour_locator)
from ..channels import find_layout
from ..channels.layout import _pair_grad_sensors
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
info, ch_type, layout=None)
this_data = data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
method = 'mean' if psd else 'rms'
this_data = _merge_grad_data(this_data, method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False)
unit = 'Hz' if psd else 'ms'
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= rgb.max(0)
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc):
"""Plot color/channel legends for butterfly plots with spatial colors."""
from mpl_toolkits.axes_grid.inset_locator import inset_axes
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(30 / ratio) + '%', height='30%', loc=loc)
pos_x, pos_y = _prepare_topomap(pos, ax)
ax.scatter(pos_x, pos_y, color=colors, s=25, marker='.', zorder=1)
for idx in bads:
ax.scatter(pos_x[idx], pos_y[idx], s=5, marker='.', color='w',
zorder=1)
if isinstance(outlines, dict):
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
set_tight_layout=True, selectable=True, zorder='unsorted'):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
if isinstance(gfp, string_types) and gfp != 'only':
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
if picks is None:
picks = list(range(info['nchan']))
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, string_types) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or '
'"bads"')
picks = list(set(picks).difference(exclude))
picks = np.array(picks)
types = np.array([channel_type(info, idx) for idx in picks])
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
fig.canvas.set_window_title(window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
if plot_type == 'butterfly':
times = evoked.times * 1e3 # time in milliseconds
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1.)
for ax in axes:
ax.set_xlabel('time (ms)')
elif plot_type == 'image':
for ax, this_type in zip(axes, ch_types_used):
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, evoked.times, xlim, ylim, titles)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
for ax in fig.axes[:-1]:
ax.set_xlabel('')
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
plt_show(show)
return fig
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha):
"""Plot data as butterfly plot."""
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
texts = list()
idxs = list()
lines = list()
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ax, this_type in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
ch_unit = units[this_type]
this_scaling = 1. if scalings is None else scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
gfp_only = (isinstance(gfp, string_types) and gfp == 'only')
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if spatial_colors is True and (locs3d == 0).all():
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
colors = _handle_spatial_colors(locs3d, info, idx,
this_type, psd, ax)
else:
if isinstance(spatial_colors, (tuple, string_types)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=3.,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.25)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
'GFP', zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set_title(titles[this_type] + ' (%d channel%s)' % (len(D),
_pl(D)))
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
import matplotlib.pyplot as plt
for ax in axes:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, rectprops=dict(alpha=0.5, facecolor='red'))
def _handle_spatial_colors(locs3d, info, idx, ch_type, psd, ax):
"""Set up spatial colors."""
x, y, z = locs3d.T
colors = _rgb(x, y, z)
ch_type = None if ch_type not in ('meg', 'mag', 'grad', 'eeg') else ch_type
layout = find_layout(info, ch_type=ch_type, exclude=[])
if layout.kind == 'custom':
head_pos = {'center': (0, 0), 'scale': (4.5, 4.5)}
outlines = np.array([0.5, 0.5])
else:
head_pos = None
outlines = 'skirt'
# drop channels that are not in the data
used_nm = np.array(_clean_names(info['ch_names']))[idx]
names = np.asarray([name for name in used_nm if name in layout.names])
name_idx = [layout.names.index(name) for name in names]
# find indices for bads
bads = [np.where(names == bad)[0][0] for bad in info['bads'] if bad in
names]
pos, outlines = _check_outlines(layout.pos[:, :2], outlines, head_pos)
pos = pos[name_idx]
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
return colors
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles):
"""Plot images."""
import matplotlib.pyplot as plt
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
# Set amplitude scaling
data = this_scaling * data[picks, :]
im = ax.imshow(data, interpolation='nearest', origin='lower',
extent=[times[0], times[-1], 0, data.shape[0]],
aspect='auto', cmap=cmap[0])
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
im.set_clim(ylim[this_type])
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ax.set_ylabel('channels (index)')
ax.set_title(titles[this_type] + ' (%d channel%s)' % (
len(data), _pl(data)))
ax.set_xlabel('time (ms)')
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True):
"""Plot evoked data using butteryfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
hline : list of floats | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
`spatial_colors` is used.
If str, must be `std` or `unsorted` (defaults to `unsorted`). If
`std`, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If `unsorted`, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title,
spatial_colors=spatial_colors, zorder=zorder,
selectable=selectable)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_grads=False, legend=True, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, merge_grads=merge_grads,
legend=legend, show=show)
def _animate_evoked_topomap(evoked, ch_type='mag', times=None, frame_rate=None,
butterfly=False, blit=True, show=True):
"""Make animation of evoked data as topomap timeseries.
The animation can be paused/resumed with left mouse button.
Left and right arrow keys can be used to move backward or forward in
time.
Parameters
----------
evoked : instance of Evoked
The evoked data.
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg'.
If None, first available channel type from ('mag', 'grad', 'eeg') is
used. Defaults to None.
times : array of floats | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None, frame rate = sfreq / 10.
Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is recommended
to use blit in combination with ``show=True``. If you intend to save
the animation it is better to disable blit. Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
The figure.
anim : instance of matplotlib FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
return _topomap_animation(evoked, ch_type=ch_type, times=times,
frame_rate=frame_rate, butterfly=butterfly,
blit=blit, show=show)
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots (after scaling has been applied). e.g.
clim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim,
hline=None, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="image",
cmap=cmap)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
times = evoked.times * 1e3
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_data(times, di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
def plot_evoked_white(evoked, noise_cov, show=True):
"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
[1]_. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
scalings=None, rank=None, show=show)
def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True):
"""Help plot_evoked_white.
Additional Parameters
---------------------
scalings : dict | None
The rescaling method to be applied to improve the accuracy of rank
estimaiton. If dict, it will override the following default values
(used if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
Note. Theses values were tested on different datests across various
conditions. You should not need to update them.
rank : dict of int | None
Dict of ints where keys are 'eeg', 'mag' or 'grad'. If None,
the rank is detected automatically. Defaults to None. Note.
The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
"""
from ..cov import whiten_evoked, read_cov # recursive import
from ..cov import _estimate_rank_meeg_cov
import matplotlib.pyplot as plt
if scalings is None:
scalings = dict(mag=1e12, grad=1e11, eeg=1e5)
ch_used = [ch for ch in ['eeg', 'grad', 'mag'] if ch in evoked]
has_meg = 'mag' in ch_used and 'grad' in ch_used
if isinstance(noise_cov, string_types):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
proc_history = evoked.info.get('proc_history', [])
has_sss = False
if len(proc_history) > 0:
# if SSSed, mags and grad are not longer independent
# for correct display of the whitening we will drop the cross-terms
# (the gradiometer * magnetometer covariance)
has_sss = 'max_info' in proc_history[0] and has_meg
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
evoked.pick_channels([evoked.ch_names[k] for k in picks])
# important to re-pick. will otherwise crash on systems with ref channels
# as first sensor block
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
picks_list = _picks_by_type(evoked.info, meg_combined=has_sss)
if has_meg and has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
rank_list = []
for cov in noise_cov:
rank_ = {}
C = cov['data'].copy()
picks_list2 = [k for k in picks_list]
if rank is None:
if has_meg and not has_sss:
picks_list2 += _picks_by_type(evoked.info,
meg_combined=True)
for ch_type, this_picks in picks_list2:
this_info = pick_info(evoked.info, this_picks)
idx = np.ix_(this_picks, this_picks)
this_rank = _estimate_rank_meeg_cov(C[idx], this_info,
scalings)
rank_[ch_type] = this_rank
if rank is not None:
rank_.update(rank)
rank_list.append(rank_)
evokeds_white = [whiten_evoked(evoked, n, picks, rank=r)
for n, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
fig, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used == 1),
(n_columns == 1 and n_ch_used > 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
times = evoked.times * 1e3
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = {'eeg': 'black', 'mag': 'blue', 'grad': 'cyan',
'meg': 'steelblue'}
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k')
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--')
# Now plot the GFP
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')