-
Notifications
You must be signed in to change notification settings - Fork 1
/
mgs_task.py
executable file
·1398 lines (1214 loc) · 50.1 KB
/
mgs_task.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# -*- py-which-shell: "python3"; -*-
# python -m doctest -v mgs_task.py
from __future__ import division
import numpy
import math
import numpy.matlib
import numpy.random
from psychopy import visual, core, event, logging
import glob
import re
import os
import sys
import pandas
import numpy
import pickle
import winmute
import datetime
from showCal import showCal
def vdate_str():
"""
return YYYYMMDD format for right now
"""
datestr = datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d")
return(datestr)
def getSubjectDataPath(subjid, tasktype, imgset, timepoint):
"""
generate (and create) a path to save subjects visit data
directory for subject and task like "10931/01_eeg_A"
"""
# remove date from id if it is the last bit ("xxxx_YYYYMMDD" -> "xxxx")
vdate = vdate_str()
subjid = re.sub("_%s$" % vdate, "", subjid)
# subj_info/subj/timepoint/modality_set_date/
savepath = 'subj_info'
tpdir = "%02d" % int(timepoint)
lastdir = "%s_%s_%s" % (tasktype, imgset, vdate)
datadir = os.path.join(savepath, subjid, tpdir, lastdir)
logdir = os.path.join(datadir, 'log')
for thisoutdir in [savepath, datadir, logdir]:
if not os.path.exists(thisoutdir):
os.makedirs(thisoutdir)
return((datadir, logdir))
def getInfoFromDataPath(datadir):
"""
get subject info from path
expects os.dirname(pkl_file)
from subj_info/abcd/01/eeg_mgsenc-B_20180221/runs_info.pkl
to ("abcd", "eeg", "A", 1 )
"""
print(datadir)
# match to subj_info directory with
# dir delimiter like linux (/) or windows (\, escaped as \\\)
rm_str = ".*subj_info[/\\\\]"
print(rm_str)
justdir = re.sub(rm_str, "", datadir)
(subjid_timepoint, taskinfo) = os.path.split(justdir)
(subjid, timepoint) = os.path.split(subjid_timepoint)
(tasktype, imgset, vdate) = taskinfo.split("_")
imgset = re.sub('mgsenc-', '', imgset) # mgsenc-A into A
timepoint = int(timepoint)
return((subjid, tasktype, imgset, timepoint))
# this causes some artifacts!?
def take_screenshot(win, name):
if not os.path.exists('screenshots/'):
os.mkdir('screenshots')
win.getMovieFrame() # Defaults to front buffer, I.e. what's on screen now
win.saveMovieFrames('screenshots/' + name + '.png')
def eventToTTL(event, side, catagory):
"""
make trigger from event, side, and catagory
event inc in 50: (50-200: cue,img,isi,mgs)
catagory inc in 10 (10->30: None,Outdoor,Indoor)
side inc in 1 (1->4: Left -> Right)
61 == cue:None,Left
234 == mgs:Indoor,Right
iti,start,end hardcoded => 245,128,129
"""
if event == 'iti':
return(254)
if event == 'start':
return(128)
if event == 'end':
return(129)
# ttl codes are a composit of the event, and image side + catagory
# allow unspecified triggers as 0
# outside of 0, range is 61 (cue:None,Left) to 234 (mgs:Indoor,Right)
# cues < 100 (61 -> 84); img < 150 (111 -> 134);
# isi < 200 ( 161 -> 184); mgs < 250 (211->234)
event_dict = {'bad': 0, 'cue': 50, 'img': 100, 'isi': 150, 'mgs': 200}
ctgry_dict = {'bad': 0, 'None': 10, 'Outdoor': 20, 'Indoor': 30}
side_dict = {'bad': 0, 'Left': 1,
'NearLeft': 2, 'NearRight': 3, 'Right': 4}
composite = event_dict.get(event, 0) + \
side_dict.get(side, 0) + \
ctgry_dict.get(catagory, 0)
return(composite)
def center_textbox(textbox):
"""
center textbox in 'norm' units
"""
tw = textbox.boundingBox[0]
ww = float(textbox.win.size[0])
textbox.pos = (-tw/ww, 0)
def read_timing(onsetprefix):
"""
read onsets files given a pattern. will append *1D to pattern
#everything not in pattern is stripped from returned onset dict
only file name is used
use with parse_onsets()
# input looks like
with open('stims/example_0001_01_cue.1D','w') as f:
f.write(" ".join(["%.02f:dur" % x
for x in numpy.cumsum(.5+numpy.repeat(2,10) ) ]));
stims/4060499668621037816/
dly.1D
mgs.1D
vgs_Left_Indoor.1D
vgs_Left_None.1D
vgs_Left_Outdoor.1D
vgs_Right_Indoor.1D
vgs_Right_None.1D
vgs_Right_Outdoor.1D
"""
onsetdict = {}
onsetfiles = glob.glob(onsetprefix + '*1D')
if(len(onsetfiles) <= 0):
msg = 'no onset files in %s' % onsetprefix
raise Exception(msg)
for onset1D in onsetfiles:
# key name will be file name but
# remove the last 3 chars (.1D) and the glob part
# onsettype = onset1D[:-3].replace(onsetprefix, '')
onsettype = os.path.basename(onset1D)[:-3]
with open(onset1D) as f:
onsetdict[onsettype] = [float(x.split(':')[0])
for x in f.read().split()]
return(onsetdict)
def shuf_for_ntrials(vec, ntrials):
'''
shuf_for_ntrials creates a shuffled vector
repeated to match the number of trials
'''
nitems = len(vec)
if nitems == 0 or ntrials == 0:
return([])
items_over = ntrials % nitems
nfullvec = int(math.floor(ntrials/nitems))
# have 3 items want 5 trials
# nfullvec=1; items_over=2
# repeat full vector as many times as we can
mat = numpy.matlib.repmat(vec, 1, nfullvec).flatten()
# then add a truncated shuffled vector as needed
if items_over > 0:
numpy.random.shuffle(vec)
mat = numpy.append(mat, vec[:items_over])
numpy.random.shuffle(mat)
return(mat)
def wait_until(stoptime, maxwait=30):
"""
just like core.wait, but instead of waiting a duration
we wait until a stoptime.
optional maxwait will throw an error if we are wating too long
so we dont get stuck. defaults to 30 seconds
"""
if stoptime - core.getTime() > maxwait:
raise ValueError("request to wait until stoptime is more than " +
"30 seconds, secify maxwait to avoid this error")
# will hog cpu -- no pyglet.media.dispatch_events here
while core.getTime() < stoptime:
continue
def response_should_be(pos, accept_keys):
"""
evaluate known/unknown and left/right based on position and accept_keys
position==nan means it was never seen
neg. position is left, positive position is right
"""
# are we "near" left or right
if(pos != round(pos, 0)):
near_str = 'Near'
else:
near_str = ''
if(math.isnan(pos)):
return((accept_keys['unknown'], accept_keys['oops']))
elif(pos < 0):
return((accept_keys['known'], accept_keys[near_str + 'Left']))
elif(pos > 0):
return((accept_keys['known'], accept_keys[near_str + 'Right']))
else:
raise ValueError('bad pos?! how!?')
# we could img.units='deg', but that might complicate testing on diff screens
def ratio(screen, image, scale):
return(float(screen) * scale/float(image))
def replace_img(img, filename, horz, imgpercent=.04, defsize=(225, 255), vertOffset=0):
'''
replace_img adjust the image and position of a psychopy.visual.ImageStim
'''
# set image, get props
if filename is not None:
img.image = filename
(iw, ih) = img._origSize
else:
(iw, ih) = defsize
(sw, sh) = img.win.size
img.units = 'pix' # was 'pixel' in psychopy2
# resize img
scalew = ratio(sw, iw, imgpercent)
# scaleh= ratio(sh,ih,imgpercent)
# scale evenly in relation to x-axis
# img.size=(scalew*iw,scalew*sw/sh*ih) # if units were 'norm'
img.size = (scalew*iw, scalew*ih) # square pixels
# img._requestedSize => (80,80) if imgprecent=.1*sw=800
# # position
winmax = sw/float(2)
# horz=-1 => -400 for 800 wide screen
horzpos = horz*winmax
halfimgsize = scalew*iw/2.0
# are we partially off the screen? max edges perfect
if horzpos - halfimgsize < -winmax:
horzpos = halfimgsize - winmax
elif horzpos + halfimgsize > winmax:
horzpos = winmax - halfimgsize
# where to show the image
vertpos = (vertOffset)*sh/2.0
# set
img.pos = (horzpos, vertpos)
# # draw if we are not None
if filename is not None:
img.draw()
return(img.pos)
def parse_onsets(onsetsprefix):
"""
read in all files matching a glob
return a by trial dataframe
"""
onsets = read_timing(onsetsprefix)
# first event is 'cue'. sort onsets dict by first onset time. pick first
firstevent = sorted([(k, min(v)) for k, v in onsets.items()],
key=lambda x: x[1])[0][0]
if len(onsets) < 1:
raise Exception("nothing to do! No onsets parsed from %s" %
onsetsprefix)
# make long format data frame
d = pandas.DataFrame([[k, t] for k, v in onsets.items() for t in v])
d.columns = ['event', 'onset']
# set trial numbers
d = d.sort_values('onset')
d['trial'] = numpy.nan
startrows = d.event.str.startswith(firstevent)
tnums = [x + 1 for x in range(len(d[startrows]))]
d.loc[startrows, 'trial'] = tnums
# merge with info about vgs
vgssplit = [[x] + x.split('_')[1:]
for x in onsets.keys()
if re.match('vgs', x)]
if len(vgssplit) < 2:
raise Exception("bad vgs onsets from %s" % onsetsprefix)
# merge vgs array split with timing, name columns,
# fill forward (so dly and mgs get side, image type, number)
# remove all but first 3 chars of the event name
df = pandas.DataFrame(vgssplit).\
rename(columns={0: 'event', 1: 'side', 2: 'imgtype'}).\
merge(d, how='outer').\
sort_values(by='onset').\
fillna(method='ffill').\
assign(event=lambda x: [s[0:3] for s in x.event])
# cue comes before vgs so is all messed up
# the previous forwardfill fillna didn't do good things.
# fix that with back fill
df.loc[df.event == 'cue', ['side', 'imgtype']] = numpy.NaN
df = df.fillna(method='bfill')
# trial wide format
trialdf = df.\
set_index(['trial', 'event', 'side', 'imgtype']).\
unstack('event').\
reset_index()
# reset columns
trialdf.columns = [i[1] if i[0] == 'onset' else i[0]
for i in trialdf.columns]
return(trialdf)
def gen_imagedf(path_dict):
"""
find images and label to be merged with task
input is dict {'label: ['path/to/globimgs','path2'],..}
- label matches vgs_ file names
output is "imgtype","image" dataframe
- imgtype names matches parse_onsets
>>> ();run_data=gen_run_info(3, None, 'A', task='mri') # doctest:+ELLIPSIS
(...
>>> # have 16 images per run, plus a bunch of 'None' images
>>> [len(x.query("imgfile==imgfile")) for x in run_data['run_timing'] ]
[16, 16, 16]
>>> img_trial = run_data['imagedf'].merge(
... pandas.concat(run_data['run_timing']),
... on='imgfile', how='left')
>>> # everything with a trial is used
>>> len(img_trial.query('trial==trial and not used'))
0
"""
# path_dict={'Inside': ['SUN/circle_select/inside/*png'],
# 'Outside': ['SUN/circle_select/outside_man/*png',
# 'SUN/circle_select/outside_nat/*png',
# ]
# }
# go through each label and the files that match all
# patterns provided
labeled_image_list = []
for label, paths in path_dict.items():
for p in paths:
subdir = os.path.basename(os.path.dirname(p))
for img in glob.glob(p):
labeled_image_list.append([label, img, subdir])
print(path_dict.items())
df = pandas.DataFrame(labeled_image_list)
df.columns = ['imgtype', 'imgfile', 'subtype']
df['used'] = False
return(df)
def gen(imgset='A', timingglob='stims/mri/135154167238784597/*'):
"""
this is here for example usage. probably not called by anthing
"""
path_dict = {'Indoor': ['img/' + imgset + '/inside/*png'],
'Outdoor': ['img/' + imgset + '/outside_man/*png',
'img/' + imgset + '/outside_nat/*png',
]}
imagedf = gen_imagedf(path_dict)
trialdf = parse_onsets(timingglob)
(imagedf, trialdf) = gen_stimlist_df(imagedf, trialdf)
def pick_n_from_group(x, cnts):
have_n = len(x)
imgtype = x.imgtype.iloc[0]
print('looking at %s' % imgtype)
want_n = cnts.get(imgtype, 0)
print('want %d' % want_n)
take_n = min([want_n, have_n])
return(x.sample(take_n))
def dist_total_into_n(total, n):
"""
@param: total - total to be distrubuted
@param: n - number of elements
@return: n length array that sums to total
"""
if n == 0 or total == 0:
return([])
arr = [float(total)/float(n)]*n
arr[0] = int(numpy.ceil(arr[0]))
arr[1:] = [int(numpy.floor(x)) for x in arr[1:]]
if numpy.sum(arr) != total:
raise ValueError('total %d not matched in %s' % (total, arr))
numpy.random.shuffle(arr)
return(arr)
def gen_stimlist_df(imagedf, trialdf):
"""
given a trial df and an image df
return "stimlist" array of dicts describing each trial
N.B. operations on imagedf are inplace as well as returned!
"""
# dataframe gets a new colum for the image file
trialdf['imgfile'] = None
# go through each event type
eventtypes = pandas.unique(sorted(trialdf.imgtype))
for thisevent in eventtypes:
searchstr = 'imgtype == "%s"' % thisevent
idx = trialdf.query(searchstr).index
needn = len(idx)
if(needn <= 0):
print("WARNING: need 0 trials of %s?! how is that possible?" %
thisevent)
img_aval = imagedf.query(searchstr + " & used == False")
if len(img_aval) < needn:
if thisevent == 'None':
continue
# TODO if len > 0 resample some
print("WARNING: %s: not enough images (need %d > have %d)" %
(searchstr, needn, len(img_aval)))
else:
# within the trial type there maybe a subtype (outside_man, outside_nat)
subtypes = pandas.unique(img_aval.subtype)
if len(subtypes) <= 1:
# this condition is not needed. below would work fine for even n=1
# left for clarity
this_samp = img_aval.sample(needn)
trialdf.loc[idx, 'imgfile'] = this_samp.imgfile.values
# print('%s: setting %d to used' % (subtypes[0], len(this_samp.index)))
imagedf.loc[this_samp.index, 'used'] = True
else:
# if we have 10 trials and 2 types
# needns will be [5, 5]
needns = dist_total_into_n(needn, len(subtypes))
# shuffle the index of trial df that need this imagetype
# so we can assing images to random trials (within imagetype)
idx_shuf = list(idx)
numpy.random.shuffle(idx_shuf)
starti = 0
# print('dist subtypes: %s for %s' % (needns,subtypes))
for i in range(len(subtypes)):
# use index to get values
# starti-endi is what indexes to pull from trialdf idx
thissubtype = subtypes[i]
num = needns[i]
endi = starti+num
# narrow what images we take
img_aval = imagedf.query(searchstr + " & subtype == @thissubtype & used == False")
intoidx = idx_shuf[starti:endi]
# what if we dont have enough images!?
if len(img_aval) < num:
print("WARNING: %s+%s: not enough images (need %d > have %d)"
% (thisevent, thissubtype, needn, len(img_aval)))
# sample the avaible images, mark as used
this_samp = img_aval.sample(num)
usethese = this_samp.imgfile.values
trialdf.loc[intoidx, 'imgfile'] = usethese
imagedf.loc[this_samp.index, 'used'] = True
# update the new starti for trialdf idx
starti = endi
return(imagedf, trialdf)
def msg_screen(win, textbox, msg='no message given', pos=(0, 0)):
textbox.pos = pos
textbox.text = msg
textbox.draw()
win.flip()
core.wait(.4)
event.waitKeys()
def create_window(fullscr, screen=0):
""" create window either fullscreen or 800,600
hide mouse cursor and make active
"""
# setup screen
if fullscr:
win = visual.Window(fullscr=fullscr, screen=screen)
else:
win = visual.Window([800, 600])
win.winHandle.activate() # make sure the display window has focus
win.mouseVisible = False # and that we don't see the mouse
# -- change color to black --
win.color = (-1, -1, -1)
# flip twice to get the color
win.flip()
win.flip()
return(win)
def double_size(vec,scale=2):
try:
return [x * scale for x in vec]
except:
return [.2, .2]
class mgsTask:
# initialize all the compoents we need
def __init__(self,
win,
accept_keys={'known': '1',
'maybeknown': '2',
'maybeunknown': '9',
'unknown': '0',
'Left': '1',
'NearLeft': '2',
'NearRight': '9',
'Right': '9',
'oops': '5'},
vertOffset=0,
ET_type=None,
usePP=False,
fullscreen=True,
pp_address=0xD010, #0xDFF8, # updated 20220614 new eeg
zeroTTL=True,
recVideo=False):
# compensate for mdiway pause
self.addTime = 0
# were we given a window?
# make our own if not
if win is None:
win = create_window(fullscreen)
# settings for eyetracking and parallel port ttl (eeg)
# thisscript=os.path.abspath(os.path.dirname(__file__))
# self.vpxDll = os.path.join(thisscript,"VPX_InterApp.dll")
#self.vpxDll = 'C:\\Users\\Public\\Desktop\\tasks\\EyeTracking_ViewPointConnect\\VPX_InterApp.dll'
self.vpxDll = 'C:\\Users\\Luna\\Desktop\\VPx32-Client\\VPX_InterApp_32.dll'
# 20221212- 64bit dll with 32bit python errors:
# OSError: [WinError 193] %1 is not a valid Win32 application
#self.vpxDll = 'C:\\Users\\Luna\\Desktop\\VPx64-Client\\VPX_InterApp_64.dll'
#self.vpxDll = 'C:\Users\Luna\Desktop\VPx32\Interfaces\Programing\SDK\\VPX_InterApp_32.dll'
self.usePP = usePP
# ## eyetracking -- updated later if to be used
self.vpx = None
self.eyelink = None
self.ET_type = ET_type
# # parallel port triggers or eyetracking
if self.ET_type == "arrington":
self.init_vpx()
elif self.ET_type == "pylink":
from pylink_help import eyelink
self.eyelink = eyelink(win.size)
if self.usePP:
self.pp_address = pp_address
self.zeroTTL = zeroTTL
self.init_PP()
# settings for parallel port
# see also 0x03BC, LPT2 0x0278 or 0x0378, LTP 0x0278
#self.pp_address = 0x0378
#self.pp_address = 0x0278
#self.pp_address = 0xDFF8 # EEG
# self.pp_address = 0x0378 # ASL practice
# want to mute windows computer
# so monitor switching doesn't beep
self.winvolume = winmute.winmute()
self.verbose = True
# how far off the horizonal do we display cross and images?
self.vertOffset = vertOffset
# do we tell arrington to record eye video?
self.recVideo = recVideo
self.runEyeName = datetime.datetime.strftime(
datetime.datetime.now(),
"unnamed_%Y%m%d_%H%M%S.avi")
# images relative to screen size
self.imgratsize = .15
# window and keys
self.win = win
self.accept_keys = accept_keys
# allocate screen parts
self.img = visual.ImageStim(win, name="imgdot", interpolate=True)
self.crcl = visual.Circle(win, units='pix', radius=10, lineColor=None,
fillColor='yellow', name="circledot")
# ,AutoDraw=False)
self.crcl.units = 'pix'
# instruction eyes image
# for draw_instruction_eyes(self,
self.eyeimg = visual.ImageStim(win, name="eye_img_instructions",
interpolate=True)
self.eyeimg.image = 'img/instruction/eyes_center.png'
self.eyeimg.pos = (0, -.9)
# instructions overview
self.imgoverview = visual.ImageStim(win, name="eye_img_overview",
interpolate=True)
self.imgoverview.image = 'img/instruction/overview.png'
self.timer = core.Clock()
# could have just one and change the color
self.iti_fix = visual.TextStim(win, text='+', name='iti_fixation',
color='white', bold=True)
self.isi_fix = visual.TextStim(win, text='+', name='isi_fixation',
color='yellow', bold=True)
self.cue_fix = visual.TextStim(win, text='+', name='cue_fixation',
color='royalblue', bold=True)
# double size
# in psychopy2 size was 1, now is [.1,.1]
# and new size is reasonable! (matches old double) so
# this doesn't actually do anything (scale=1)
self.iti_fix.size = double_size(self.iti_fix.size, 1)
self.isi_fix.size = double_size(self.isi_fix.size, 1)
self.cue_fix.size = double_size(self.cue_fix.size, 1)
self.textbox = visual.TextStim(win, text='**', name='generic_textbox',
alignHoriz='left', color='white',
wrapWidth=2)
# if we are mr and want horzinal line to have vertical offset,
# need to increase position
# .5 is center
self.iti_fix.pos[1] = self.vertOffset
self.isi_fix.pos[1] = self.vertOffset
self.cue_fix.pos[1] = self.vertOffset
# # for quiz
self.text_KU = visual.TextStim(win,
text='seen:\nyes, maybe yes | maybe no, no',
name='KnownUnknown',
alignHoriz='center',
color='white',
height=.07,
wrapWidth=2,
pos=(-0.2, -.75))
# self.text_KU.units = 'pixels'
# self.text_KU.size = 8
self.text_LR = visual.TextStim(win,
text='side:\nfar left, mid left | mid right, far right',
name='LeftRight',
alignHoriz='center',
color='white',
height=0.07,
wrapWidth=2,
pos=(-0.2, -.75))
# self.text_LR.units = 'pixels'
# self.text_LR.size = 8
# for recall only:
# tuplet of keys and text: like ('1', 'text after pushed')
self.dir_key_text = [
(self.accept_keys['Left'], 'left'),
(self.accept_keys['NearLeft'], ' left'),
(self.accept_keys['NearRight'], 'right '),
(self.accept_keys['Right'], ' right'),
(self.accept_keys['oops'], ' oops ')
]
self.known_key_text = [
(self.accept_keys['known'], 'known'),
(self.accept_keys['maybeknown'], 'known'),
(self.accept_keys['maybeunknown'], 'unknown'),
(self.accept_keys['unknown'], 'unknown')
]
# show side
self.recall_sides = [visual.Circle(win, radius=10, lineColor=None,
fillColor='yellow',
units='pix',
name="recall_dot%d" % x)
for x in range(4)]
self.recall_txt = [visual.TextStim(win, text=str(x),
color='black',
units='pix',
name='recall_%d' % x)
for x in range(4)]
def eyetracking_newfile(self, fname):
# start a new file and pause it
if self.vpx:
fname = str(fname)
# setup for eye recording video
self.runEyeName = fname.replace(".txt", "")
self.vpx_send('dataFile_Pause 1')
self.vpx_send('dataFile_NewName "%s"' % fname)
if self.verbose:
print("tried to open eyetracking file %s" % fname)
self.vpx_send('say "newfile %s"' % fname)
if self.eyelink:
savename = fname[0:8]
if savename != fname:
# save as month day hour minute if name is too long 12311259
# will fetch later using recieveDataFile()
# TODO: consider
# len(str(hex(4212312356))[2:]) include 2digit year as hex
# turns into 9 digits after 2042
# could savely includ seconds instead of year
# or different encoding that uses all letters base 36?
# numpy.base_repr(xxxxxx, 36)
# len(base_repr(991231235959,36)) == 8 (2dig year)
# len(base_repr(int(datetime.datetime(2099,12,31,23,59,59).strftime("%s")),36)) == 7
savename = datetime.datetime.strftime(datetime.datetime.now(), "%m%d%H%M")
self.eyelink.open(savename)
if self.verbose:
print("open eyetracking file with truncated name '%s'" %
savename)
def start_aux(self):
"""
start eyetracking, send start ttl to parallel port
"""
self.winvolume.mute_all()
if self.usePP:
self.send_code('start', None, None)
# causes 10ms delay
if self.vpx:
self.vpx_send('dataFile_Pause 0')
if self.recVideo:
print("send eyeMoive_NewName cmd")
self.vpx_send('eyeMovie_NewName "%s.avi"' %
self.runEyeName)
elif self.eyelink:
self.eyelink.start()
def stop_aux(self):
"""
stop eyetracking file, send start ttl to parallel port
"""
if self.usePP:
self.send_code('end', None, None)
# causes 10ms delay
if self.vpx:
self.vpx_send('dataFile_Close 0')
if self.recVideo:
print("send end movie cmd")
self.vpx_send('eyeMovie_Close')
elif self.eyelink:
self.eyelink.stop()
# self.winvolume.undo_mute() # causes error
self.winvolume.unmute_all()
def init_vpx(self):
if not hasattr(self, 'vpx') or not self.vpx:
from ctypes import cdll, CDLL
# vpxDll="C:/ARI/VP/VPX_InterApp.dll"
if not os.path.exists(self.vpxDll):
Exception('cannot find eyetracking dll @ ' + self.vpxDll)
print("# avotech dll: " + self.vpxDll)
cdll.LoadLibrary(self.vpxDll)
self.vpx = CDLL(self.vpxDll)
if self.vpx.VPX_GetStatus(1) < 1:
Exception('ViewPoint is not running!')
print("# VPX status: %s" % self.vpx.VPX_GetStatus(1))
self.vpx_send('say "mgs_task is connected"')
def vpx_send(self, cmd):
"""
VPX needs ascii encoding. py3 default is utf8
2023-01-13 @ 7T with new comp"""
return self.vpx.VPX_SendCommand(cmd.encode('ascii'))
def init_PP(self):
# TODO: TEST SOMEWHERE
if self.usePP:
# initialize parallel port
if not hasattr(self, 'port'):
# might need to 'pip install pyparallel'
from psychopy import parallel
# address is hex for windows, but str for linux
#print("using port: %x" % self.pp_address)
print(f"using port: {self.pp_address}")
self.port = parallel.ParallelPort(address=self.pp_address)
def log_and_code(self, event, side, catagory, logh=None, takeshots=None,
num=1, trialno=None):
self.send_code(event, side, catagory, trialno)
if logh is not None:
logh.log(level=logging.INFO,
msg='flipped %s (%s,%s)' % (event, side, catagory))
if takeshots:
take_screenshot(self.win, takeshots + ('_%02d_%s' % (num, event)))
def send_code(self, event, side, catagory, trialno=None):
"""
send a trigger on parallel port (eeg) or ethernet (eyetracker)
in MR, we do eyetracking, and want to send a trigger to the tracker
in EEG, we dont have eye tracking, but want to annotate screen flips
"""
# see also: vpx.VPX_GetStatus(VPX_STATUS_ViewPointIsRunning) < 1
if self.usePP:
# send code, or 100 if cannot find
thistrigger = eventToTTL(event, side, catagory)
self.send_ttl(thistrigger)
if self.ET_type in ['arrington', 'pylink']:
# if we have a trialno, include it in the output
if trialno is not None:
cat_t = "%s_%d" % (catagory, trialno)
else:
cat_t = catagory
ttlstr = "_".join(map(lambda x: "%s" % x, [event, side, cat_t]))
self.set_et_event(ttlstr)
def set_et_event(self, ttlstr):
"""
set eyetracking event to either arrington or eyelink
"""
if self.vpx:
self.vpx_send('dataFile_InsertString "%s"' % ttlstr)
elif self.eyelink:
self.eyelink.trigger(ttlstr)
# report what we did if verbose
if self.verbose:
print("eye code %s" % ttlstr)
if self.vpx:
self.vpx_send('say "sent %s"' % ttlstr)
def send_ttl(self, thistrigger):
"""
send ttl trigger to parallel port (setup by init_PP)
wait 10ms and send 0
"""
thistrigger = int(thistrigger)
self.port.setData(thistrigger)
if self.verbose:
print("eeg code %s" % thistrigger)
if self.zeroTTL:
core.wait(.01) # wait 10ms and send zero
self.port.setData(0)
def wait_for_scanner(self, trigger, msg='Waiting for scanner (pulse trigger)'):
"""
wait for scanner trigger press
start any auxilary things (eyetracking for mri, ttl for eeg)
return time of keypush
"""
self.textbox.setText(msg % trigger)
center_textbox(self.textbox)
self.textbox.draw()
self.win.flip()
event.waitKeys(keyList=trigger)
starttime = core.getTime()
self.start_aux() # eyetracking/parallel port
self.run_iti()
return(starttime)
def run_iti(self, iti=0):
"""
simple iti. flush logs
globals:
iti_fix visual.TextStim
"""
self.iti_fix.draw()
self.win.callOnFlip(self.log_and_code, 'iti', None, None)
showtime = self.win.flip()
logging.flush()
if(iti > 0):
core.wait(iti)
return(showtime)
def vgs_show(self, imgon, posstr, imgfile=None, imgtype=None, logh=None,
takeshots=False, trialno=None):
"""
run the vgs event: show an image with a dot over it in some postiion
"""
# set horz postion from side (left,right). center if unknown
horz = {'Right': 1, 'Left': -1, 'NearLeft': -.5, 'NearRight': .5}.\
get(posstr, 0)
imgpos = replace_img(self.img, imgfile, horz, self.imgratsize,
vertOffset=self.vertOffset)
self.crcl.pos = imgpos
self.crcl.draw()
self.win.callOnFlip(self.log_and_code, 'img', posstr, imgtype,
logh, takeshots, num=2, trialno=trialno)
wait_until(imgon)
showtime = self.win.flip()
return(showtime)
def sacc_trial(self, t, starttime=0, takeshots=None, logh=None):
"""
saccade trial
globals:
win, cue_fix, isi_fix
"""
if(starttime == 0):
starttime = core.getTime()
cueon = starttime + t['cue']
imgon = starttime + t['vgs']
ision = starttime + t['dly']
mgson = starttime + t['mgs']
# if takeshots: take_screenshot(self.win,takeshots+'_00_start')
# give header for output if this is the first trial
if t.thisN == 0:
print("")
print("ideal\tcur\tlaunch\tpos\ttype\tdly\tdiff (remaning iti)\taddTime")
print("%.02f\t%.02f\t%.02f\t%s\t%s\t%.02f\t%.02f\t%.02f" %
(t['cue'],
core.getTime(),
starttime + t['cue'],
t['side'],
t['imgtype'],
t['mgs'] - t['dly'],
starttime + t['cue'] - core.getTime(),
self.addTime
))
# get ready red target
self.cue_fix.draw()
self.win.callOnFlip(self.log_and_code, 'cue', t['side'], t['imgtype'],
logh, takeshots, 1, trialno=t['trial'])
wait_until(cueon)
cueflipt = self.win.flip()
# show an image if we have one to show
vgsflipt = self.vgs_show(imgon, t['side'], t['imgfile'], t['imgtype'],
logh, takeshots, trialno=t['trial'])
# back to fix
self.isi_fix.draw()
self.win.callOnFlip(self.log_and_code, 'isi', t['side'], t['imgtype'],
logh, takeshots, 3, trialno=t['trial'])
wait_until(ision)
isiflipt = self.win.flip()
# memory guided (recall)
# -- empty screen nothing to draw
self.win.callOnFlip(self.log_and_code, 'mgs', t['side'], t['imgtype'],
logh, takeshots, 4, t['trial'])
wait_until(mgson)
mgsflipt = self.win.flip()
# ----
# N.B. after this filp we still need to wait MGS wait time
# ---
# send back all the flip times
return({'cue': cueflipt, 'vgs': vgsflipt, 'dly': isiflipt,
'mgs': mgsflipt})
# coded with wait instead of wait_until:
# # get ready
# cue_fix.draw(); win.flip(); core.wait(0.5)
# # visual guided
# replace_img(img,imgfile,horz,.05); win.flip(); core.wait(.5)
# # back to fix
# isi_fix.draw(); win.flip(); core.wait(0.5)
# # memory guided
# win.flip(); core.wait(.5)
def key_feedback(self, keys_text_tupple, feedback, timer, maxtime=1.5):
"""
record button response and reaction time
display equally long for regardless of RT
provide feedback after push
globals:
win
"""
validkeys = [x[0] for x in keys_text_tupple]
# validkeys = ['1','2','3','4']
origtext = feedback.text
# get list of tuple (keypush,rt)
t = event.waitKeys(keyList=validkeys, maxWait=maxtime,
timeStamped=timer)